refactor: Major reorganization of optimization_engine module structure
BREAKING CHANGE: Module paths have been reorganized for better maintainability. Backwards compatibility aliases with deprecation warnings are provided. New Structure: - core/ - Optimization runners (runner, intelligent_optimizer, etc.) - processors/ - Data processing - surrogates/ - Neural network surrogates - nx/ - NX/Nastran integration (solver, updater, session_manager) - study/ - Study management (creator, wizard, state, reset) - reporting/ - Reports and analysis (visualizer, report_generator) - config/ - Configuration management (manager, builder) - utils/ - Utilities (logger, auto_doc, etc.) - future/ - Research/experimental code Migration: - ~200 import changes across 125 files - All __init__.py files use lazy loading to avoid circular imports - Backwards compatibility layer supports old import paths with warnings - All existing functionality preserved To migrate existing code: OLD: from optimization_engine.nx_solver import NXSolver NEW: from optimization_engine.nx.solver import NXSolver OLD: from optimization_engine.runner import OptimizationRunner NEW: from optimization_engine.core.runner import OptimizationRunner 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
43
optimization_engine/config/__init__.py
Normal file
43
optimization_engine/config/__init__.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
Configuration Management
|
||||
========================
|
||||
|
||||
Configuration loading, validation, and building.
|
||||
|
||||
Modules:
|
||||
- manager: ConfigManager for loading/saving configs
|
||||
- builder: OptimizationConfigBuilder for creating configs
|
||||
- setup_wizard: Interactive configuration setup
|
||||
- capability_matcher: Match capabilities to requirements
|
||||
"""
|
||||
|
||||
# Lazy imports to avoid circular dependencies
|
||||
def __getattr__(name):
|
||||
if name == 'ConfigManager':
|
||||
from .manager import ConfigManager
|
||||
return ConfigManager
|
||||
elif name == 'ConfigValidationError':
|
||||
from .manager import ConfigValidationError
|
||||
return ConfigValidationError
|
||||
elif name == 'OptimizationConfigBuilder':
|
||||
from .builder import OptimizationConfigBuilder
|
||||
return OptimizationConfigBuilder
|
||||
elif name == 'SetupWizard':
|
||||
from .setup_wizard import SetupWizard
|
||||
return SetupWizard
|
||||
elif name == 'CapabilityMatcher':
|
||||
from .capability_matcher import CapabilityMatcher
|
||||
return CapabilityMatcher
|
||||
elif name == 'TemplateLoader':
|
||||
from .template_loader import TemplateLoader
|
||||
return TemplateLoader
|
||||
raise AttributeError(f"module 'optimization_engine.config' has no attribute '{name}'")
|
||||
|
||||
__all__ = [
|
||||
'ConfigManager',
|
||||
'ConfigValidationError',
|
||||
'OptimizationConfigBuilder',
|
||||
'SetupWizard',
|
||||
'CapabilityMatcher',
|
||||
'TemplateLoader',
|
||||
]
|
||||
403
optimization_engine/config/builder.py
Normal file
403
optimization_engine/config/builder.py
Normal file
@@ -0,0 +1,403 @@
|
||||
"""
|
||||
Optimization Configuration Builder
|
||||
|
||||
Helps users build multi-objective optimization configurations by:
|
||||
1. Discovering available design variables from FEA model
|
||||
2. Listing available objectives and constraints
|
||||
3. Creating structured optimization_config.json
|
||||
|
||||
Supports:
|
||||
- Multi-objective optimization (minimize weight + stress simultaneously)
|
||||
- Constraints (max displacement, stress limits, mass limits)
|
||||
- User selection of which objectives/constraints to apply
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List
|
||||
import json
|
||||
|
||||
|
||||
class OptimizationConfigBuilder:
|
||||
"""
|
||||
Interactive builder for optimization configurations.
|
||||
|
||||
Workflow:
|
||||
1. Discover model capabilities (design variables, analysis type)
|
||||
2. Present available objectives/constraints to user
|
||||
3. Build configuration based on user selections
|
||||
"""
|
||||
|
||||
# Available objectives that can be extracted from OP2 files
|
||||
AVAILABLE_OBJECTIVES = {
|
||||
'minimize_mass': {
|
||||
'description': 'Minimize total mass (weight reduction)',
|
||||
'extractor': 'mass_extractor',
|
||||
'metric': 'total_mass',
|
||||
'units': 'kg',
|
||||
'direction': 'minimize',
|
||||
'typical_weight': 5.0 # Higher priority in multi-objective
|
||||
},
|
||||
'minimize_max_stress': {
|
||||
'description': 'Minimize maximum von Mises stress',
|
||||
'extractor': 'stress_extractor',
|
||||
'metric': 'max_von_mises',
|
||||
'units': 'MPa',
|
||||
'direction': 'minimize',
|
||||
'typical_weight': 10.0 # Very important - failure prevention
|
||||
},
|
||||
'minimize_max_displacement': {
|
||||
'description': 'Minimize maximum displacement (increase stiffness)',
|
||||
'extractor': 'displacement_extractor',
|
||||
'metric': 'max_displacement',
|
||||
'units': 'mm',
|
||||
'direction': 'minimize',
|
||||
'typical_weight': 3.0
|
||||
},
|
||||
'minimize_volume': {
|
||||
'description': 'Minimize total volume (material usage)',
|
||||
'extractor': 'volume_extractor',
|
||||
'metric': 'total_volume',
|
||||
'units': 'mm^3',
|
||||
'direction': 'minimize',
|
||||
'typical_weight': 4.0
|
||||
}
|
||||
}
|
||||
|
||||
# Available constraints
|
||||
AVAILABLE_CONSTRAINTS = {
|
||||
'max_stress_limit': {
|
||||
'description': 'Maximum allowable von Mises stress',
|
||||
'extractor': 'stress_extractor',
|
||||
'metric': 'max_von_mises',
|
||||
'units': 'MPa',
|
||||
'typical_value': 200.0, # Below yield strength with safety factor
|
||||
'constraint_type': 'upper_bound'
|
||||
},
|
||||
'max_displacement_limit': {
|
||||
'description': 'Maximum allowable displacement',
|
||||
'extractor': 'displacement_extractor',
|
||||
'metric': 'max_displacement',
|
||||
'units': 'mm',
|
||||
'typical_value': 1.0, # Stiffness requirement
|
||||
'constraint_type': 'upper_bound'
|
||||
},
|
||||
'min_mass_limit': {
|
||||
'description': 'Minimum required mass (structural integrity)',
|
||||
'extractor': 'mass_extractor',
|
||||
'metric': 'total_mass',
|
||||
'units': 'kg',
|
||||
'typical_value': 0.3,
|
||||
'constraint_type': 'lower_bound'
|
||||
},
|
||||
'max_mass_limit': {
|
||||
'description': 'Maximum allowable mass (weight budget)',
|
||||
'extractor': 'mass_extractor',
|
||||
'metric': 'total_mass',
|
||||
'units': 'kg',
|
||||
'typical_value': 0.5,
|
||||
'constraint_type': 'upper_bound'
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, model_discovery_result: Dict[str, Any]):
|
||||
"""
|
||||
Initialize with model discovery results.
|
||||
|
||||
Args:
|
||||
model_discovery_result: Output from discover_fea_model()
|
||||
"""
|
||||
self.model_info = model_discovery_result
|
||||
self.config = {
|
||||
'design_variables': [],
|
||||
'objectives': [],
|
||||
'constraints': [],
|
||||
'optimization_settings': {
|
||||
'n_trials': 100,
|
||||
'sampler': 'TPE',
|
||||
'n_startup_trials': 20
|
||||
}
|
||||
}
|
||||
|
||||
def list_available_design_variables(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
List all available design variables from model.
|
||||
|
||||
Returns:
|
||||
List of design variable options
|
||||
"""
|
||||
if 'expressions' not in self.model_info:
|
||||
return []
|
||||
|
||||
design_vars = []
|
||||
for expr in self.model_info['expressions']:
|
||||
if expr['value'] is not None: # Only variables with known values
|
||||
design_vars.append({
|
||||
'name': expr['name'],
|
||||
'current_value': expr['value'],
|
||||
'units': expr['units'],
|
||||
'type': expr.get('type', 'Unknown'),
|
||||
'suggested_bounds': self._suggest_bounds(expr)
|
||||
})
|
||||
|
||||
return design_vars
|
||||
|
||||
def _suggest_bounds(self, expr: Dict[str, Any]) -> tuple:
|
||||
"""
|
||||
Suggest reasonable optimization bounds for a design variable.
|
||||
|
||||
Args:
|
||||
expr: Expression dictionary
|
||||
|
||||
Returns:
|
||||
(lower_bound, upper_bound)
|
||||
"""
|
||||
value = expr['value']
|
||||
expr_type = expr.get('type', '').lower()
|
||||
|
||||
if 'angle' in expr_type or 'degrees' in expr.get('units', '').lower():
|
||||
# Angles: ±15 degrees
|
||||
return (max(0, value - 15), min(180, value + 15))
|
||||
elif 'thickness' in expr['name'].lower() or 'dimension' in expr_type:
|
||||
# Dimensions: ±30%
|
||||
return (value * 0.7, value * 1.3)
|
||||
elif 'radius' in expr['name'].lower() or 'diameter' in expr['name'].lower():
|
||||
# Radii/diameters: ±25%
|
||||
return (value * 0.75, value * 1.25)
|
||||
else:
|
||||
# Default: ±20%
|
||||
return (value * 0.8, value * 1.2)
|
||||
|
||||
def list_available_objectives(self) -> Dict[str, Dict[str, Any]]:
|
||||
"""
|
||||
List all available optimization objectives.
|
||||
|
||||
Returns:
|
||||
Dictionary of objective options
|
||||
"""
|
||||
return self.AVAILABLE_OBJECTIVES.copy()
|
||||
|
||||
def list_available_constraints(self) -> Dict[str, Dict[str, Any]]:
|
||||
"""
|
||||
List all available constraints.
|
||||
|
||||
Returns:
|
||||
Dictionary of constraint options
|
||||
"""
|
||||
return self.AVAILABLE_CONSTRAINTS.copy()
|
||||
|
||||
def add_design_variable(self, name: str, lower_bound: float, upper_bound: float):
|
||||
"""
|
||||
Add a design variable to the configuration.
|
||||
|
||||
Args:
|
||||
name: Expression name from model
|
||||
lower_bound: Minimum value
|
||||
upper_bound: Maximum value
|
||||
"""
|
||||
# Verify variable exists in model
|
||||
expr = next((e for e in self.model_info['expressions'] if e['name'] == name), None)
|
||||
if not expr:
|
||||
raise ValueError(f"Design variable '{name}' not found in model")
|
||||
|
||||
self.config['design_variables'].append({
|
||||
'name': name,
|
||||
'type': 'continuous',
|
||||
'bounds': [lower_bound, upper_bound],
|
||||
'units': expr.get('units', ''),
|
||||
'initial_value': expr['value']
|
||||
})
|
||||
|
||||
def add_objective(self, objective_key: str, weight: float = None, target: float = None):
|
||||
"""
|
||||
Add an objective to the configuration.
|
||||
|
||||
Args:
|
||||
objective_key: Key from AVAILABLE_OBJECTIVES
|
||||
weight: Importance weight (for multi-objective)
|
||||
target: Target value (optional, for goal programming)
|
||||
"""
|
||||
if objective_key not in self.AVAILABLE_OBJECTIVES:
|
||||
raise ValueError(f"Unknown objective: {objective_key}")
|
||||
|
||||
obj_info = self.AVAILABLE_OBJECTIVES[objective_key]
|
||||
|
||||
objective = {
|
||||
'name': objective_key,
|
||||
'description': obj_info['description'],
|
||||
'extractor': obj_info['extractor'],
|
||||
'metric': obj_info['metric'],
|
||||
'direction': obj_info['direction'],
|
||||
'weight': weight or obj_info['typical_weight']
|
||||
}
|
||||
|
||||
if target is not None:
|
||||
objective['target'] = target
|
||||
|
||||
self.config['objectives'].append(objective)
|
||||
|
||||
def add_constraint(self, constraint_key: str, limit_value: float):
|
||||
"""
|
||||
Add a constraint to the configuration.
|
||||
|
||||
Args:
|
||||
constraint_key: Key from AVAILABLE_CONSTRAINTS
|
||||
limit_value: Constraint limit value
|
||||
"""
|
||||
if constraint_key not in self.AVAILABLE_CONSTRAINTS:
|
||||
raise ValueError(f"Unknown constraint: {constraint_key}")
|
||||
|
||||
const_info = self.AVAILABLE_CONSTRAINTS[constraint_key]
|
||||
|
||||
constraint = {
|
||||
'name': constraint_key,
|
||||
'description': const_info['description'],
|
||||
'extractor': const_info['extractor'],
|
||||
'metric': const_info['metric'],
|
||||
'type': const_info['constraint_type'],
|
||||
'limit': limit_value,
|
||||
'units': const_info['units']
|
||||
}
|
||||
|
||||
self.config['constraints'].append(constraint)
|
||||
|
||||
def set_optimization_settings(self, n_trials: int = None, sampler: str = None):
|
||||
"""
|
||||
Configure optimization algorithm settings.
|
||||
|
||||
Args:
|
||||
n_trials: Number of optimization iterations
|
||||
sampler: 'TPE', 'CMAES', 'GP', etc.
|
||||
"""
|
||||
if n_trials:
|
||||
self.config['optimization_settings']['n_trials'] = n_trials
|
||||
if sampler:
|
||||
self.config['optimization_settings']['sampler'] = sampler
|
||||
|
||||
def build(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Build and validate the configuration.
|
||||
|
||||
Returns:
|
||||
Complete optimization configuration
|
||||
"""
|
||||
# Validation
|
||||
if not self.config['design_variables']:
|
||||
raise ValueError("At least one design variable is required")
|
||||
|
||||
if not self.config['objectives']:
|
||||
raise ValueError("At least one objective is required")
|
||||
|
||||
# Add metadata
|
||||
self.config['model_info'] = {
|
||||
'sim_file': self.model_info.get('sim_file', ''),
|
||||
'solutions': self.model_info.get('solutions', [])
|
||||
}
|
||||
|
||||
return self.config
|
||||
|
||||
def save(self, output_path: Path):
|
||||
"""
|
||||
Save configuration to JSON file.
|
||||
|
||||
Args:
|
||||
output_path: Path to save configuration
|
||||
"""
|
||||
config = self.build()
|
||||
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump(config, f, indent=2)
|
||||
|
||||
print(f"Configuration saved to: {output_path}")
|
||||
|
||||
def print_summary(self):
|
||||
"""Print a human-readable summary of the configuration."""
|
||||
print("\n" + "="*60)
|
||||
print("OPTIMIZATION CONFIGURATION SUMMARY")
|
||||
print("="*60)
|
||||
|
||||
print(f"\nModel: {self.model_info.get('sim_file', 'Unknown')}")
|
||||
|
||||
print(f"\nDesign Variables ({len(self.config['design_variables'])}):")
|
||||
for dv in self.config['design_variables']:
|
||||
print(f" • {dv['name']}: [{dv['bounds'][0]:.2f}, {dv['bounds'][1]:.2f}] {dv['units']}")
|
||||
|
||||
print(f"\nObjectives ({len(self.config['objectives'])}):")
|
||||
for obj in self.config['objectives']:
|
||||
print(f" • {obj['description']} (weight: {obj['weight']:.1f})")
|
||||
|
||||
print(f"\nConstraints ({len(self.config['constraints'])}):")
|
||||
for const in self.config['constraints']:
|
||||
operator = '<=' if const['type'] == 'upper_bound' else '>='
|
||||
print(f" • {const['description']}: {const['metric']} {operator} {const['limit']} {const['units']}")
|
||||
|
||||
print(f"\nOptimization Settings:")
|
||||
print(f" • Trials: {self.config['optimization_settings']['n_trials']}")
|
||||
print(f" • Sampler: {self.config['optimization_settings']['sampler']}")
|
||||
|
||||
print("="*60 + "\n")
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
from optimization_engine.model_discovery import discover_fea_model
|
||||
|
||||
# Step 1: Discover model
|
||||
print("Step 1: Discovering FEA model...")
|
||||
model_result = discover_fea_model("tests/Bracket_sim1.sim")
|
||||
|
||||
# Step 2: Create builder
|
||||
builder = OptimizationConfigBuilder(model_result)
|
||||
|
||||
# Step 3: Show available options
|
||||
print("\n" + "="*60)
|
||||
print("AVAILABLE DESIGN VARIABLES:")
|
||||
print("="*60)
|
||||
for dv in builder.list_available_design_variables():
|
||||
print(f"\n• {dv['name']}")
|
||||
print(f" Current value: {dv['current_value']} {dv['units']}")
|
||||
print(f" Suggested bounds: {dv['suggested_bounds']}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("AVAILABLE OBJECTIVES:")
|
||||
print("="*60)
|
||||
for key, obj in builder.list_available_objectives().items():
|
||||
print(f"\n• {key}")
|
||||
print(f" Description: {obj['description']}")
|
||||
print(f" Default weight: {obj['typical_weight']}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("AVAILABLE CONSTRAINTS:")
|
||||
print("="*60)
|
||||
for key, const in builder.list_available_constraints().items():
|
||||
print(f"\n• {key}")
|
||||
print(f" Description: {const['description']}")
|
||||
print(f" Typical value: {const['typical_value']} {const['units']}")
|
||||
|
||||
# Step 4: Build a multi-objective configuration
|
||||
print("\n" + "="*60)
|
||||
print("BUILDING CONFIGURATION:")
|
||||
print("="*60)
|
||||
|
||||
# Add design variables
|
||||
builder.add_design_variable('tip_thickness', 15.0, 25.0)
|
||||
builder.add_design_variable('support_angle', 20.0, 40.0)
|
||||
builder.add_design_variable('support_blend_radius', 5.0, 15.0)
|
||||
|
||||
# Add objectives: minimize weight AND minimize stress
|
||||
builder.add_objective('minimize_mass', weight=5.0)
|
||||
builder.add_objective('minimize_max_stress', weight=10.0)
|
||||
|
||||
# Add constraints: max displacement < 1.0 mm, max stress < 200 MPa
|
||||
builder.add_constraint('max_displacement_limit', limit_value=1.0)
|
||||
builder.add_constraint('max_stress_limit', limit_value=200.0)
|
||||
|
||||
# Set optimization settings
|
||||
builder.set_optimization_settings(n_trials=150, sampler='TPE')
|
||||
|
||||
# Print summary
|
||||
builder.print_summary()
|
||||
|
||||
# Save configuration
|
||||
builder.save(Path('optimization_config.json'))
|
||||
|
||||
print("\nConfiguration ready for optimization!")
|
||||
336
optimization_engine/config/capability_matcher.py
Normal file
336
optimization_engine/config/capability_matcher.py
Normal file
@@ -0,0 +1,336 @@
|
||||
"""
|
||||
Capability Matcher
|
||||
|
||||
Matches required workflow steps to existing codebase capabilities and identifies
|
||||
actual knowledge gaps.
|
||||
|
||||
Author: Atomizer Development Team
|
||||
Version: 0.1.0 (Phase 2.5)
|
||||
Last Updated: 2025-01-16
|
||||
"""
|
||||
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass
|
||||
|
||||
from optimization_engine.future.workflow_decomposer import WorkflowStep
|
||||
from optimization_engine.utils.codebase_analyzer import CodebaseCapabilityAnalyzer
|
||||
|
||||
|
||||
@dataclass
|
||||
class StepMatch:
|
||||
"""Represents the match status of a workflow step."""
|
||||
step: WorkflowStep
|
||||
is_known: bool
|
||||
implementation: Optional[str] = None
|
||||
similar_capabilities: List[str] = None
|
||||
confidence: float = 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class CapabilityMatch:
|
||||
"""Complete matching result for a workflow."""
|
||||
known_steps: List[StepMatch]
|
||||
unknown_steps: List[StepMatch]
|
||||
overall_confidence: float
|
||||
coverage: float # Percentage of steps that are known
|
||||
|
||||
|
||||
class CapabilityMatcher:
|
||||
"""Matches required workflow steps to existing capabilities."""
|
||||
|
||||
def __init__(self, analyzer: Optional[CodebaseCapabilityAnalyzer] = None):
|
||||
self.analyzer = analyzer or CodebaseCapabilityAnalyzer()
|
||||
self.capabilities = self.analyzer.analyze_codebase()
|
||||
|
||||
# Mapping from workflow actions to capability checks
|
||||
self.action_to_capability = {
|
||||
'identify_parameters': ('geometry', 'expression_filtering'),
|
||||
'update_parameters': ('optimization', 'parameter_updating'),
|
||||
'read_expression': ('geometry', 'parameter_extraction'), # Reading expressions from .prt
|
||||
'run_analysis': ('simulation', 'nx_solver'),
|
||||
'optimize': ('optimization', 'optuna_integration'),
|
||||
'create_material': ('materials', 'xml_generation'),
|
||||
'apply_loads': ('loads_bc', 'load_application'),
|
||||
'generate_mesh': ('mesh', 'mesh_generation')
|
||||
}
|
||||
|
||||
def match(self, workflow_steps: List[WorkflowStep]) -> CapabilityMatch:
|
||||
"""
|
||||
Match workflow steps to existing capabilities.
|
||||
|
||||
Returns:
|
||||
{
|
||||
'known_steps': [
|
||||
{'step': WorkflowStep(...), 'implementation': 'parameter_updater.py'},
|
||||
...
|
||||
],
|
||||
'unknown_steps': [
|
||||
{'step': WorkflowStep(...), 'similar_to': 'extract_stress', 'gap': 'strain_from_op2'}
|
||||
],
|
||||
'overall_confidence': 0.80, # 4/5 steps known
|
||||
'coverage': 0.80
|
||||
}
|
||||
"""
|
||||
known_steps = []
|
||||
unknown_steps = []
|
||||
|
||||
for step in workflow_steps:
|
||||
match = self._match_step(step)
|
||||
|
||||
if match.is_known:
|
||||
known_steps.append(match)
|
||||
else:
|
||||
unknown_steps.append(match)
|
||||
|
||||
# Calculate coverage
|
||||
total_steps = len(workflow_steps)
|
||||
coverage = len(known_steps) / total_steps if total_steps > 0 else 0.0
|
||||
|
||||
# Calculate overall confidence
|
||||
# Known steps contribute 100%, unknown steps contribute based on similarity
|
||||
total_confidence = sum(m.confidence for m in known_steps)
|
||||
total_confidence += sum(m.confidence for m in unknown_steps)
|
||||
overall_confidence = total_confidence / total_steps if total_steps > 0 else 0.0
|
||||
|
||||
return CapabilityMatch(
|
||||
known_steps=known_steps,
|
||||
unknown_steps=unknown_steps,
|
||||
overall_confidence=overall_confidence,
|
||||
coverage=coverage
|
||||
)
|
||||
|
||||
def _match_step(self, step: WorkflowStep) -> StepMatch:
|
||||
"""Match a single workflow step to capabilities."""
|
||||
|
||||
# Special handling for extract_result action
|
||||
if step.action == 'extract_result':
|
||||
return self._match_extraction_step(step)
|
||||
|
||||
# Special handling for run_analysis action
|
||||
if step.action == 'run_analysis':
|
||||
return self._match_simulation_step(step)
|
||||
|
||||
# General capability matching
|
||||
if step.action in self.action_to_capability:
|
||||
category, capability_name = self.action_to_capability[step.action]
|
||||
|
||||
if category in self.capabilities:
|
||||
if capability_name in self.capabilities[category]:
|
||||
if self.capabilities[category][capability_name]:
|
||||
# Found!
|
||||
details = self.analyzer.get_capability_details(category, capability_name)
|
||||
impl = details['implementation_files'][0] if details and details.get('implementation_files') else 'unknown'
|
||||
|
||||
return StepMatch(
|
||||
step=step,
|
||||
is_known=True,
|
||||
implementation=impl,
|
||||
confidence=1.0
|
||||
)
|
||||
|
||||
# Not found - check for similar capabilities
|
||||
similar = self._find_similar_capabilities(step)
|
||||
|
||||
return StepMatch(
|
||||
step=step,
|
||||
is_known=False,
|
||||
similar_capabilities=similar,
|
||||
confidence=0.3 if similar else 0.0 # Some confidence if similar capabilities exist
|
||||
)
|
||||
|
||||
def _match_extraction_step(self, step: WorkflowStep) -> StepMatch:
|
||||
"""Special matching logic for result extraction steps."""
|
||||
result_type = step.params.get('result_type', '')
|
||||
|
||||
if not result_type:
|
||||
return StepMatch(step=step, is_known=False, confidence=0.0)
|
||||
|
||||
# Check if this extraction capability exists
|
||||
if 'result_extraction' in self.capabilities:
|
||||
if result_type in self.capabilities['result_extraction']:
|
||||
if self.capabilities['result_extraction'][result_type]:
|
||||
# Found!
|
||||
details = self.analyzer.get_capability_details('result_extraction', result_type)
|
||||
impl = details['implementation_files'][0] if details and details.get('implementation_files') else 'unknown'
|
||||
|
||||
return StepMatch(
|
||||
step=step,
|
||||
is_known=True,
|
||||
implementation=impl,
|
||||
confidence=1.0
|
||||
)
|
||||
|
||||
# Not found - find similar extraction capabilities
|
||||
similar = self.analyzer.find_similar_capabilities(result_type, 'result_extraction')
|
||||
|
||||
# For result extraction, if similar capabilities exist, confidence is higher
|
||||
# because the pattern is likely the same (just different OP2 attribute)
|
||||
confidence = 0.6 if similar else 0.0
|
||||
|
||||
return StepMatch(
|
||||
step=step,
|
||||
is_known=False,
|
||||
similar_capabilities=similar,
|
||||
confidence=confidence
|
||||
)
|
||||
|
||||
def _match_simulation_step(self, step: WorkflowStep) -> StepMatch:
|
||||
"""Special matching logic for simulation steps."""
|
||||
solver = step.params.get('solver', '')
|
||||
|
||||
# Check if NX solver exists
|
||||
if 'simulation' in self.capabilities:
|
||||
if self.capabilities['simulation'].get('nx_solver'):
|
||||
# NX solver exists - check specific solver type
|
||||
solver_lower = solver.lower()
|
||||
|
||||
if solver_lower in self.capabilities['simulation']:
|
||||
if self.capabilities['simulation'][solver_lower]:
|
||||
# Specific solver supported
|
||||
details = self.analyzer.get_capability_details('simulation', 'nx_solver')
|
||||
impl = details['implementation_files'][0] if details and details.get('implementation_files') else 'unknown'
|
||||
|
||||
return StepMatch(
|
||||
step=step,
|
||||
is_known=True,
|
||||
implementation=impl,
|
||||
confidence=1.0
|
||||
)
|
||||
|
||||
# NX solver exists but specific solver type not verified
|
||||
# Still high confidence because solver is generic
|
||||
details = self.analyzer.get_capability_details('simulation', 'nx_solver')
|
||||
impl = details['implementation_files'][0] if details and details.get('implementation_files') else 'unknown'
|
||||
|
||||
return StepMatch(
|
||||
step=step,
|
||||
is_known=True, # Consider it known since NX solver is generic
|
||||
implementation=impl,
|
||||
confidence=0.9 # Slight uncertainty about specific solver
|
||||
)
|
||||
|
||||
return StepMatch(step=step, is_known=False, confidence=0.0)
|
||||
|
||||
def _find_similar_capabilities(self, step: WorkflowStep) -> List[str]:
|
||||
"""Find capabilities similar to what's needed for this step."""
|
||||
similar = []
|
||||
|
||||
# Check in the step's domain
|
||||
if step.domain in self.capabilities:
|
||||
# Look for capabilities with overlapping words
|
||||
step_words = set(step.action.lower().split('_'))
|
||||
|
||||
for cap_name, exists in self.capabilities[step.domain].items():
|
||||
if not exists:
|
||||
continue
|
||||
|
||||
cap_words = set(cap_name.lower().split('_'))
|
||||
|
||||
# If there's overlap, it's similar
|
||||
if step_words & cap_words:
|
||||
similar.append(cap_name)
|
||||
|
||||
return similar
|
||||
|
||||
def get_match_summary(self, match: CapabilityMatch) -> str:
|
||||
"""Get human-readable summary of capability matching."""
|
||||
lines = [
|
||||
"Workflow Component Analysis",
|
||||
"=" * 80,
|
||||
""
|
||||
]
|
||||
|
||||
if match.known_steps:
|
||||
lines.append(f"Known Capabilities ({len(match.known_steps)} of {len(match.known_steps) + len(match.unknown_steps)}):")
|
||||
lines.append("-" * 80)
|
||||
|
||||
for i, step_match in enumerate(match.known_steps, 1):
|
||||
step = step_match.step
|
||||
lines.append(f"{i}. {step.action.replace('_', ' ').title()}")
|
||||
lines.append(f" Domain: {step.domain}")
|
||||
if step_match.implementation:
|
||||
lines.append(f" Implementation: {step_match.implementation}")
|
||||
lines.append(f" Status: KNOWN")
|
||||
lines.append("")
|
||||
|
||||
if match.unknown_steps:
|
||||
lines.append(f"Missing Capabilities ({len(match.unknown_steps)}):")
|
||||
lines.append("-" * 80)
|
||||
|
||||
for i, step_match in enumerate(match.unknown_steps, 1):
|
||||
step = step_match.step
|
||||
lines.append(f"{i}. {step.action.replace('_', ' ').title()}")
|
||||
lines.append(f" Domain: {step.domain}")
|
||||
if step.params:
|
||||
lines.append(f" Required: {step.params}")
|
||||
lines.append(f" Status: MISSING")
|
||||
|
||||
if step_match.similar_capabilities:
|
||||
lines.append(f" Similar capabilities found: {', '.join(step_match.similar_capabilities)}")
|
||||
lines.append(f" Confidence: {step_match.confidence:.0%} (can adapt from similar)")
|
||||
else:
|
||||
lines.append(f" Confidence: {step_match.confidence:.0%} (needs research)")
|
||||
lines.append("")
|
||||
|
||||
lines.append("=" * 80)
|
||||
lines.append(f"Overall Coverage: {match.coverage:.0%}")
|
||||
lines.append(f"Overall Confidence: {match.overall_confidence:.0%}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
"""Test the capability matcher."""
|
||||
from optimization_engine.future.workflow_decomposer import WorkflowDecomposer
|
||||
|
||||
print("Capability Matcher Test")
|
||||
print("=" * 80)
|
||||
print()
|
||||
|
||||
# Initialize components
|
||||
analyzer = CodebaseCapabilityAnalyzer()
|
||||
decomposer = WorkflowDecomposer()
|
||||
matcher = CapabilityMatcher(analyzer)
|
||||
|
||||
# Test with strain optimization request
|
||||
test_request = "I want to evaluate strain on a part with sol101 and optimize this (minimize) using iterations and optuna to lower it varying all my geometry parameters that contains v_ in its expression"
|
||||
|
||||
print("Request:")
|
||||
print(test_request)
|
||||
print()
|
||||
|
||||
# Decompose workflow
|
||||
print("Step 1: Decomposing workflow...")
|
||||
steps = decomposer.decompose(test_request)
|
||||
print(f" Identified {len(steps)} workflow steps")
|
||||
print()
|
||||
|
||||
# Match to capabilities
|
||||
print("Step 2: Matching to existing capabilities...")
|
||||
match = matcher.match(steps)
|
||||
print()
|
||||
|
||||
# Display results
|
||||
print(matcher.get_match_summary(match))
|
||||
|
||||
# Show what needs to be researched
|
||||
if match.unknown_steps:
|
||||
print("\nResearch Needed:")
|
||||
print("-" * 80)
|
||||
for step_match in match.unknown_steps:
|
||||
step = step_match.step
|
||||
print(f" Topic: How to {step.action.replace('_', ' ')}")
|
||||
print(f" Domain: {step.domain}")
|
||||
|
||||
if step_match.similar_capabilities:
|
||||
print(f" Strategy: Adapt from {step_match.similar_capabilities[0]}")
|
||||
print(f" (follow same pattern, different OP2 attribute)")
|
||||
else:
|
||||
print(f" Strategy: Research from scratch")
|
||||
print(f" (search docs, ask user for examples)")
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
244
optimization_engine/config/manager.py
Normal file
244
optimization_engine/config/manager.py
Normal file
@@ -0,0 +1,244 @@
|
||||
"""Configuration validation and management for Atomizer studies.
|
||||
|
||||
This module provides schema-based validation for optimization configuration files,
|
||||
ensuring consistency across all studies.
|
||||
|
||||
Usage:
|
||||
# In run_optimization.py
|
||||
from optimization_engine.config.manager import ConfigManager
|
||||
|
||||
config_manager = ConfigManager(Path(__file__).parent / "1_setup" / "optimization_config.json")
|
||||
config_manager.load_config()
|
||||
|
||||
if not config_manager.validate():
|
||||
print(config_manager.get_validation_report())
|
||||
sys.exit(1)
|
||||
|
||||
# Access validated configuration
|
||||
design_vars = config_manager.get_design_variables()
|
||||
objectives = config_manager.get_objectives()
|
||||
"""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
try:
|
||||
import jsonschema
|
||||
JSONSCHEMA_AVAILABLE = True
|
||||
except ImportError:
|
||||
JSONSCHEMA_AVAILABLE = False
|
||||
print("Warning: jsonschema not installed. Install with: pip install jsonschema>=4.17.0")
|
||||
|
||||
|
||||
class ConfigValidationError(Exception):
|
||||
"""Raised when configuration validation fails."""
|
||||
pass
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
"""Manages and validates optimization configuration files."""
|
||||
|
||||
def __init__(self, config_path: Path):
|
||||
"""
|
||||
Initialize ConfigManager with path to optimization_config.json.
|
||||
|
||||
Args:
|
||||
config_path: Path to optimization_config.json file
|
||||
"""
|
||||
self.config_path = Path(config_path)
|
||||
self.schema_path = Path(__file__).parent / "schemas" / "optimization_config_schema.json"
|
||||
self.config: Optional[Dict[str, Any]] = None
|
||||
self.validation_errors: List[str] = []
|
||||
|
||||
def load_schema(self) -> Dict[str, Any]:
|
||||
"""Load JSON schema for validation."""
|
||||
if not self.schema_path.exists():
|
||||
raise FileNotFoundError(f"Schema file not found: {self.schema_path}")
|
||||
|
||||
with open(self.schema_path, 'r') as f:
|
||||
return json.load(f)
|
||||
|
||||
def load_config(self) -> Dict[str, Any]:
|
||||
"""Load configuration file."""
|
||||
if not self.config_path.exists():
|
||||
raise FileNotFoundError(f"Config file not found: {self.config_path}")
|
||||
|
||||
with open(self.config_path, 'r') as f:
|
||||
self.config = json.load(f)
|
||||
return self.config
|
||||
|
||||
def validate(self, strict: bool = True) -> bool:
|
||||
"""
|
||||
Validate configuration against schema.
|
||||
|
||||
Args:
|
||||
strict: If True, enforce all validations. If False, only warn on non-critical issues.
|
||||
|
||||
Returns:
|
||||
True if valid, False otherwise
|
||||
"""
|
||||
if self.config is None:
|
||||
self.load_config()
|
||||
|
||||
self.validation_errors = []
|
||||
|
||||
# JSON Schema validation
|
||||
if JSONSCHEMA_AVAILABLE:
|
||||
schema = self.load_schema()
|
||||
try:
|
||||
jsonschema.validate(instance=self.config, schema=schema)
|
||||
except jsonschema.ValidationError as e:
|
||||
self.validation_errors.append(f"Schema validation failed: {e.message}")
|
||||
if strict:
|
||||
return False
|
||||
else:
|
||||
self.validation_errors.append("jsonschema not installed - schema validation skipped")
|
||||
|
||||
# Custom validations
|
||||
self._validate_design_variable_bounds()
|
||||
self._validate_multi_objective_consistency()
|
||||
self._validate_file_locations()
|
||||
self._validate_extraction_consistency()
|
||||
|
||||
return len(self.validation_errors) == 0
|
||||
|
||||
def _validate_design_variable_bounds(self):
|
||||
"""Ensure bounds are valid (min < max)."""
|
||||
for dv in self.config.get("design_variables", []):
|
||||
bounds = dv.get("bounds", [])
|
||||
if len(bounds) == 2 and bounds[0] >= bounds[1]:
|
||||
self.validation_errors.append(
|
||||
f"Design variable '{dv.get('parameter', 'unknown')}': "
|
||||
f"min ({bounds[0]}) must be < max ({bounds[1]})"
|
||||
)
|
||||
|
||||
def _validate_multi_objective_consistency(self):
|
||||
"""Validate multi-objective settings consistency."""
|
||||
n_objectives = len(self.config.get("objectives", []))
|
||||
protocol = self.config.get("optimization_settings", {}).get("protocol")
|
||||
sampler = self.config.get("optimization_settings", {}).get("sampler")
|
||||
|
||||
if n_objectives > 1:
|
||||
# Multi-objective should use protocol_11 and NSGA-II
|
||||
if protocol and protocol != "protocol_11_multi_objective":
|
||||
self.validation_errors.append(
|
||||
f"Multi-objective optimization ({n_objectives} objectives) "
|
||||
f"should use protocol_11_multi_objective (got {protocol})"
|
||||
)
|
||||
if sampler and sampler != "NSGAIISampler":
|
||||
self.validation_errors.append(
|
||||
f"Multi-objective optimization should use NSGAIISampler (got {sampler})"
|
||||
)
|
||||
elif n_objectives == 1:
|
||||
# Single-objective should not use NSGA-II
|
||||
if sampler == "NSGAIISampler":
|
||||
self.validation_errors.append(
|
||||
"Single-objective optimization should not use NSGAIISampler "
|
||||
"(use TPESampler or CmaEsSampler)"
|
||||
)
|
||||
|
||||
def _validate_file_locations(self):
|
||||
"""Check if config is in correct location (1_setup/)."""
|
||||
if "1_setup" not in str(self.config_path.parent):
|
||||
self.validation_errors.append(
|
||||
f"Warning: Config should be in '1_setup/' directory, "
|
||||
f"found in {self.config_path.parent}"
|
||||
)
|
||||
|
||||
def _validate_extraction_consistency(self):
|
||||
"""Validate extraction specifications."""
|
||||
# Check objectives have extraction specs
|
||||
for obj in self.config.get("objectives", []):
|
||||
if "extraction" not in obj:
|
||||
self.validation_errors.append(
|
||||
f"Objective '{obj.get('name', 'unknown')}' missing extraction specification"
|
||||
)
|
||||
|
||||
# Check constraints have extraction specs
|
||||
for constraint in self.config.get("constraints", []):
|
||||
if "extraction" not in constraint:
|
||||
self.validation_errors.append(
|
||||
f"Constraint '{constraint.get('name', 'unknown')}' missing extraction specification"
|
||||
)
|
||||
|
||||
def get_validation_report(self) -> str:
|
||||
"""Get human-readable validation report."""
|
||||
if not self.validation_errors:
|
||||
return "[OK] Configuration is valid"
|
||||
|
||||
report = "[FAIL] Configuration validation failed:\n"
|
||||
for i, error in enumerate(self.validation_errors, 1):
|
||||
report += f" {i}. {error}\n"
|
||||
return report
|
||||
|
||||
# Type-safe accessor methods
|
||||
|
||||
def get_design_variables(self) -> List[Dict[str, Any]]:
|
||||
"""Get design variables with validated structure."""
|
||||
if self.config is None:
|
||||
self.load_config()
|
||||
return self.config.get("design_variables", [])
|
||||
|
||||
def get_objectives(self) -> List[Dict[str, Any]]:
|
||||
"""Get objectives with validated structure."""
|
||||
if self.config is None:
|
||||
self.load_config()
|
||||
return self.config.get("objectives", [])
|
||||
|
||||
def get_constraints(self) -> List[Dict[str, Any]]:
|
||||
"""Get constraints with validated structure."""
|
||||
if self.config is None:
|
||||
self.load_config()
|
||||
return self.config.get("constraints", [])
|
||||
|
||||
def get_simulation_settings(self) -> Dict[str, Any]:
|
||||
"""Get simulation settings."""
|
||||
if self.config is None:
|
||||
self.load_config()
|
||||
return self.config.get("simulation", {})
|
||||
|
||||
def get_optimization_settings(self) -> Dict[str, Any]:
|
||||
"""Get optimization settings."""
|
||||
if self.config is None:
|
||||
self.load_config()
|
||||
return self.config.get("optimization_settings", {})
|
||||
|
||||
|
||||
# CLI tool for validation
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python config_manager.py <path_to_optimization_config.json>")
|
||||
print("\nExample:")
|
||||
print(" python config_manager.py studies/drone_gimbal_arm_optimization/1_setup/optimization_config.json")
|
||||
sys.exit(1)
|
||||
|
||||
config_path = Path(sys.argv[1])
|
||||
|
||||
print(f"Validating configuration: {config_path}")
|
||||
print("=" * 60)
|
||||
|
||||
manager = ConfigManager(config_path)
|
||||
|
||||
try:
|
||||
manager.load_config()
|
||||
print("[OK] Config loaded successfully")
|
||||
|
||||
is_valid = manager.validate()
|
||||
print(manager.get_validation_report())
|
||||
|
||||
if is_valid:
|
||||
print("\n" + "=" * 60)
|
||||
print("Configuration Summary:")
|
||||
print(f" Study: {manager.config.get('study_name')}")
|
||||
print(f" Protocol: {manager.get_optimization_settings().get('protocol')}")
|
||||
print(f" Design Variables: {len(manager.get_design_variables())}")
|
||||
print(f" Objectives: {len(manager.get_objectives())}")
|
||||
print(f" Constraints: {len(manager.get_constraints())}")
|
||||
|
||||
sys.exit(0 if is_valid else 1)
|
||||
except Exception as e:
|
||||
print(f"[ERROR] {e}")
|
||||
sys.exit(1)
|
||||
575
optimization_engine/config/setup_wizard.py
Normal file
575
optimization_engine/config/setup_wizard.py
Normal file
@@ -0,0 +1,575 @@
|
||||
"""
|
||||
Optimization Setup Wizard - Phase 3.3
|
||||
|
||||
Interactive wizard that validates the complete optimization pipeline BEFORE running trials:
|
||||
1. Introspect NX model for available expressions
|
||||
2. Run baseline simulation to generate OP2
|
||||
3. Introspect OP2 file to detect element types and available results
|
||||
4. LLM-guided configuration based on actual model contents
|
||||
5. Dry-run pipeline validation with baseline OP2
|
||||
6. Report success/failure before starting optimization
|
||||
|
||||
This prevents wasted time running optimizations that will fail!
|
||||
|
||||
Author: Atomizer Development Team
|
||||
Version: 0.1.0 (Phase 3.3)
|
||||
Last Updated: 2025-01-16
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional, Tuple
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
|
||||
from optimization_engine.nx.updater import NXParameterUpdater
|
||||
from optimization_engine.nx.solver import NXSolver
|
||||
from optimization_engine.extractor_orchestrator import ExtractorOrchestrator
|
||||
from optimization_engine.inline_code_generator import InlineCodeGenerator
|
||||
from optimization_engine.plugins.hook_manager import HookManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelIntrospection:
|
||||
"""Results from NX model introspection."""
|
||||
expressions: Dict[str, Any] # {name: {'value': float, 'formula': str}}
|
||||
prt_file: Path
|
||||
sim_file: Path
|
||||
|
||||
|
||||
@dataclass
|
||||
class OP2Introspection:
|
||||
"""Results from OP2 file introspection."""
|
||||
element_types: List[str] # e.g., ['CHEXA', 'CPENTA', 'CTETRA']
|
||||
result_types: List[str] # e.g., ['displacement', 'stress']
|
||||
subcases: List[int] # e.g., [1]
|
||||
node_count: int
|
||||
element_count: int
|
||||
op2_file: Path
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationResult:
|
||||
"""Result from pipeline validation."""
|
||||
success: bool
|
||||
component: str # 'extractor', 'calculation', 'hook', 'objective'
|
||||
message: str
|
||||
data: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class OptimizationSetupWizard:
|
||||
"""
|
||||
Interactive wizard for validating optimization setup before running trials.
|
||||
|
||||
This wizard prevents common mistakes by:
|
||||
- Checking model expressions exist
|
||||
- Validating OP2 file contains expected results
|
||||
- Testing extractors on real data
|
||||
- Confirming calculations work
|
||||
- Verifying complete pipeline before optimization
|
||||
"""
|
||||
|
||||
def __init__(self, prt_file: Path, sim_file: Path, output_dir: Optional[Path] = None):
|
||||
"""
|
||||
Initialize optimization setup wizard.
|
||||
|
||||
Args:
|
||||
prt_file: Path to NX part file (.prt)
|
||||
sim_file: Path to NX simulation file (.sim)
|
||||
output_dir: Directory for validation outputs
|
||||
"""
|
||||
self.prt_file = Path(prt_file)
|
||||
self.sim_file = Path(sim_file)
|
||||
|
||||
if output_dir is None:
|
||||
output_dir = Path.cwd() / "optimization_validation"
|
||||
self.output_dir = Path(output_dir)
|
||||
self.output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self.model_info: Optional[ModelIntrospection] = None
|
||||
self.op2_info: Optional[OP2Introspection] = None
|
||||
self.baseline_op2: Optional[Path] = None
|
||||
|
||||
logger.info(f"OptimizationSetupWizard initialized")
|
||||
logger.info(f" Part: {self.prt_file}")
|
||||
logger.info(f" Sim: {self.sim_file}")
|
||||
logger.info(f" Output: {self.output_dir}")
|
||||
|
||||
# =========================================================================
|
||||
# STEP 1: Model Introspection
|
||||
# =========================================================================
|
||||
|
||||
def introspect_model(self) -> ModelIntrospection:
|
||||
"""
|
||||
Introspect NX model to find available expressions.
|
||||
|
||||
Returns:
|
||||
ModelIntrospection with all expressions found
|
||||
"""
|
||||
logger.info("=" * 80)
|
||||
logger.info("STEP 1: Introspecting NX Model")
|
||||
logger.info("=" * 80)
|
||||
|
||||
# Use NXParameterUpdater to read expressions
|
||||
updater = NXParameterUpdater(prt_file_path=self.prt_file)
|
||||
expressions = updater.get_all_expressions()
|
||||
|
||||
logger.info(f"Found {len(expressions)} expressions in model:")
|
||||
for name, info in expressions.items():
|
||||
logger.info(f" - {name}: {info.get('value')} ({info.get('formula', 'N/A')})")
|
||||
|
||||
self.model_info = ModelIntrospection(
|
||||
expressions=expressions,
|
||||
prt_file=self.prt_file,
|
||||
sim_file=self.sim_file
|
||||
)
|
||||
|
||||
return self.model_info
|
||||
|
||||
# =========================================================================
|
||||
# STEP 2: Baseline Simulation
|
||||
# =========================================================================
|
||||
|
||||
def run_baseline_simulation(self) -> Path:
|
||||
"""
|
||||
Run baseline simulation with current expression values.
|
||||
|
||||
This generates an OP2 file that we can introspect to see what
|
||||
element types and results are actually present.
|
||||
|
||||
Returns:
|
||||
Path to generated OP2 file
|
||||
"""
|
||||
logger.info("=" * 80)
|
||||
logger.info("STEP 2: Running Baseline Simulation")
|
||||
logger.info("=" * 80)
|
||||
logger.info("This generates OP2 file for introspection...")
|
||||
|
||||
solver = NXSolver(nastran_version='2412', use_journal=True)
|
||||
result = solver.run_simulation(self.sim_file)
|
||||
|
||||
self.baseline_op2 = result['op2_file']
|
||||
logger.info(f"Baseline simulation complete!")
|
||||
logger.info(f" OP2 file: {self.baseline_op2}")
|
||||
|
||||
return self.baseline_op2
|
||||
|
||||
# =========================================================================
|
||||
# STEP 3: OP2 Introspection
|
||||
# =========================================================================
|
||||
|
||||
def introspect_op2(self, op2_file: Optional[Path] = None) -> OP2Introspection:
|
||||
"""
|
||||
Introspect OP2 file to detect element types and available results.
|
||||
|
||||
Args:
|
||||
op2_file: Path to OP2 file (uses baseline if not provided)
|
||||
|
||||
Returns:
|
||||
OP2Introspection with detected contents
|
||||
"""
|
||||
logger.info("=" * 80)
|
||||
logger.info("STEP 3: Introspecting OP2 File")
|
||||
logger.info("=" * 80)
|
||||
|
||||
if op2_file is None:
|
||||
op2_file = self.baseline_op2
|
||||
|
||||
if op2_file is None:
|
||||
raise ValueError("No OP2 file available. Run baseline simulation first.")
|
||||
|
||||
# Use pyNastran to read OP2 and detect contents
|
||||
from pyNastran.op2.op2 import OP2
|
||||
|
||||
model = OP2()
|
||||
model.read_op2(str(op2_file))
|
||||
|
||||
# Detect element types with stress results
|
||||
# In pyNastran, stress results are stored in model.op2_results.stress
|
||||
element_types = []
|
||||
|
||||
# Dynamically discover ALL element types with stress data from pyNastran
|
||||
# Instead of hardcoding, we introspect what pyNastran actually has!
|
||||
if hasattr(model, 'op2_results') and hasattr(model.op2_results, 'stress'):
|
||||
stress_obj = model.op2_results.stress
|
||||
|
||||
# Find all attributes ending with '_stress' that have data
|
||||
for attr_name in dir(stress_obj):
|
||||
if attr_name.endswith('_stress') and not attr_name.startswith('_'):
|
||||
# Check if this element type has data
|
||||
element_data = getattr(stress_obj, attr_name, None)
|
||||
if element_data: # Has data
|
||||
# Convert attribute name to element type
|
||||
# e.g., 'chexa_stress' -> 'CHEXA'
|
||||
element_type = attr_name.replace('_stress', '').upper()
|
||||
|
||||
# Handle special cases (composite elements)
|
||||
if '_composite' not in attr_name:
|
||||
element_types.append(element_type)
|
||||
|
||||
# Also check for forces (stored differently in pyNastran)
|
||||
# Bar/beam forces are at model level, not in stress object
|
||||
if hasattr(model, 'cbar_force') and model.cbar_force:
|
||||
element_types.append('CBAR')
|
||||
if hasattr(model, 'cbeam_force') and model.cbeam_force:
|
||||
element_types.append('CBEAM')
|
||||
if hasattr(model, 'crod_force') and model.crod_force:
|
||||
element_types.append('CROD')
|
||||
|
||||
# Detect result types
|
||||
result_types = []
|
||||
if hasattr(model, 'displacements') and model.displacements:
|
||||
result_types.append('displacement')
|
||||
if element_types: # Has stress
|
||||
result_types.append('stress')
|
||||
if hasattr(model, 'cbar_force') and model.cbar_force:
|
||||
result_types.append('force')
|
||||
|
||||
# Get subcases
|
||||
subcases = []
|
||||
if hasattr(model, 'displacements') and model.displacements:
|
||||
subcases = list(model.displacements.keys())
|
||||
|
||||
# Get counts
|
||||
node_count = len(model.nodes) if hasattr(model, 'nodes') else 0
|
||||
element_count = len(model.elements) if hasattr(model, 'elements') else 0
|
||||
|
||||
logger.info(f"OP2 Introspection Results:")
|
||||
logger.info(f" Element types with stress: {element_types}")
|
||||
logger.info(f" Result types available: {result_types}")
|
||||
logger.info(f" Subcases: {subcases}")
|
||||
logger.info(f" Nodes: {node_count}")
|
||||
logger.info(f" Elements: {element_count}")
|
||||
|
||||
self.op2_info = OP2Introspection(
|
||||
element_types=element_types,
|
||||
result_types=result_types,
|
||||
subcases=subcases,
|
||||
node_count=node_count,
|
||||
element_count=element_count,
|
||||
op2_file=op2_file
|
||||
)
|
||||
|
||||
return self.op2_info
|
||||
|
||||
# =========================================================================
|
||||
# STEP 4: LLM-Guided Configuration
|
||||
# =========================================================================
|
||||
|
||||
def suggest_configuration(self, user_goal: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Use LLM to suggest configuration based on user goal and available data.
|
||||
|
||||
This would analyze:
|
||||
- User's natural language description
|
||||
- Available expressions in model
|
||||
- Available element types in OP2
|
||||
- Available result types in OP2
|
||||
|
||||
And propose a concrete configuration.
|
||||
|
||||
Args:
|
||||
user_goal: User's description of optimization goal
|
||||
|
||||
Returns:
|
||||
Suggested configuration dict
|
||||
"""
|
||||
logger.info("=" * 80)
|
||||
logger.info("STEP 4: LLM-Guided Configuration")
|
||||
logger.info("=" * 80)
|
||||
logger.info(f"User goal: {user_goal}")
|
||||
|
||||
# TODO: Implement LLM analysis
|
||||
# For now, return a manual suggestion based on OP2 contents
|
||||
|
||||
if self.op2_info is None:
|
||||
raise ValueError("OP2 not introspected. Run introspect_op2() first.")
|
||||
|
||||
# Suggest extractors based on available result types
|
||||
engineering_features = []
|
||||
|
||||
if 'displacement' in self.op2_info.result_types:
|
||||
engineering_features.append({
|
||||
'action': 'extract_displacement',
|
||||
'domain': 'result_extraction',
|
||||
'description': 'Extract displacement results from OP2 file',
|
||||
'params': {'result_type': 'displacement'}
|
||||
})
|
||||
|
||||
if 'stress' in self.op2_info.result_types and self.op2_info.element_types:
|
||||
# Use first available element type
|
||||
element_type = self.op2_info.element_types[0].lower()
|
||||
engineering_features.append({
|
||||
'action': 'extract_solid_stress',
|
||||
'domain': 'result_extraction',
|
||||
'description': f'Extract stress from {element_type.upper()} elements',
|
||||
'params': {
|
||||
'result_type': 'stress',
|
||||
'element_type': element_type
|
||||
}
|
||||
})
|
||||
|
||||
logger.info(f"Suggested configuration:")
|
||||
logger.info(f" Engineering features: {len(engineering_features)}")
|
||||
for feat in engineering_features:
|
||||
logger.info(f" - {feat['action']}: {feat['description']}")
|
||||
|
||||
return {
|
||||
'engineering_features': engineering_features,
|
||||
'inline_calculations': [],
|
||||
'post_processing_hooks': []
|
||||
}
|
||||
|
||||
# =========================================================================
|
||||
# STEP 5: Pipeline Validation (Dry Run)
|
||||
# =========================================================================
|
||||
|
||||
def validate_pipeline(self, llm_workflow: Dict[str, Any]) -> List[ValidationResult]:
|
||||
"""
|
||||
Validate complete pipeline with baseline OP2 file.
|
||||
|
||||
This executes the entire extraction/calculation/hook pipeline
|
||||
using the baseline OP2 to ensure everything works BEFORE
|
||||
starting the optimization.
|
||||
|
||||
Args:
|
||||
llm_workflow: Complete LLM workflow configuration
|
||||
|
||||
Returns:
|
||||
List of ValidationResult objects
|
||||
"""
|
||||
logger.info("=" * 80)
|
||||
logger.info("STEP 5: Pipeline Validation (Dry Run)")
|
||||
logger.info("=" * 80)
|
||||
|
||||
if self.baseline_op2 is None:
|
||||
raise ValueError("No baseline OP2 file. Run baseline simulation first.")
|
||||
|
||||
results = []
|
||||
|
||||
# Validate extractors
|
||||
logger.info("\nValidating extractors...")
|
||||
orchestrator = ExtractorOrchestrator(
|
||||
extractors_dir=self.output_dir / "generated_extractors"
|
||||
)
|
||||
|
||||
extractors = orchestrator.process_llm_workflow(llm_workflow)
|
||||
extraction_results = {}
|
||||
|
||||
for extractor in extractors:
|
||||
try:
|
||||
# Pass extractor params (like element_type) to execution
|
||||
result = orchestrator.execute_extractor(
|
||||
extractor.name,
|
||||
self.baseline_op2,
|
||||
subcase=1,
|
||||
**extractor.params # Pass params from workflow (element_type, etc.)
|
||||
)
|
||||
extraction_results.update(result)
|
||||
|
||||
results.append(ValidationResult(
|
||||
success=True,
|
||||
component='extractor',
|
||||
message=f"[OK] {extractor.name}: {list(result.keys())}",
|
||||
data=result
|
||||
))
|
||||
logger.info(f" [OK] {extractor.name}: {list(result.keys())}")
|
||||
|
||||
except Exception as e:
|
||||
results.append(ValidationResult(
|
||||
success=False,
|
||||
component='extractor',
|
||||
message=f"[FAIL] {extractor.name}: {str(e)}",
|
||||
data=None
|
||||
))
|
||||
logger.error(f" [FAIL] {extractor.name}: {str(e)}")
|
||||
|
||||
# Validate inline calculations
|
||||
logger.info("\nValidating inline calculations...")
|
||||
inline_generator = InlineCodeGenerator()
|
||||
calculations = {}
|
||||
calc_namespace = {**extraction_results, **calculations}
|
||||
|
||||
for calc_spec in llm_workflow.get('inline_calculations', []):
|
||||
try:
|
||||
generated = inline_generator.generate_from_llm_output(calc_spec)
|
||||
exec(generated.code, calc_namespace)
|
||||
|
||||
# Extract newly created variables
|
||||
for key, value in calc_namespace.items():
|
||||
if key not in extraction_results and not key.startswith('_'):
|
||||
calculations[key] = value
|
||||
|
||||
results.append(ValidationResult(
|
||||
success=True,
|
||||
component='calculation',
|
||||
message=f"[OK] {calc_spec.get('action', 'calculation')}: Created {list(calculations.keys())}",
|
||||
data=calculations
|
||||
))
|
||||
logger.info(f" [OK] {calc_spec.get('action', 'calculation')}")
|
||||
|
||||
except Exception as e:
|
||||
results.append(ValidationResult(
|
||||
success=False,
|
||||
component='calculation',
|
||||
message=f"[FAIL] {calc_spec.get('action', 'calculation')}: {str(e)}",
|
||||
data=None
|
||||
))
|
||||
logger.error(f" [FAIL] {calc_spec.get('action', 'calculation')}: {str(e)}")
|
||||
|
||||
# Validate hooks
|
||||
logger.info("\nValidating hooks...")
|
||||
hook_manager = HookManager()
|
||||
|
||||
# Load system hooks
|
||||
system_hooks_dir = Path(__file__).parent / 'plugins'
|
||||
if system_hooks_dir.exists():
|
||||
hook_manager.load_plugins_from_directory(system_hooks_dir)
|
||||
|
||||
hook_results = hook_manager.execute_hooks('post_calculation', {
|
||||
'trial_number': 0,
|
||||
'design_variables': {},
|
||||
'results': extraction_results,
|
||||
'calculations': calculations
|
||||
})
|
||||
|
||||
if hook_results:
|
||||
results.append(ValidationResult(
|
||||
success=True,
|
||||
component='hook',
|
||||
message=f"[OK] Hooks executed: {len(hook_results)} results",
|
||||
data={'hook_results': hook_results}
|
||||
))
|
||||
logger.info(f" [OK] Executed {len(hook_results)} hook(s)")
|
||||
|
||||
# Check for objective
|
||||
logger.info("\nValidating objective...")
|
||||
objective = None
|
||||
|
||||
for hook_result in hook_results:
|
||||
if hook_result and 'objective' in hook_result:
|
||||
objective = hook_result['objective']
|
||||
break
|
||||
|
||||
if objective is None:
|
||||
# Try to find objective in calculations or results
|
||||
for key in ['max_displacement', 'max_stress', 'max_von_mises']:
|
||||
if key in {**extraction_results, **calculations}:
|
||||
objective = {**extraction_results, **calculations}[key]
|
||||
logger.warning(f" [WARNING] No explicit objective, using: {key}")
|
||||
break
|
||||
|
||||
if objective is not None:
|
||||
results.append(ValidationResult(
|
||||
success=True,
|
||||
component='objective',
|
||||
message=f"[OK] Objective value: {objective}",
|
||||
data={'objective': objective}
|
||||
))
|
||||
logger.info(f" [OK] Objective value: {objective}")
|
||||
else:
|
||||
results.append(ValidationResult(
|
||||
success=False,
|
||||
component='objective',
|
||||
message="[FAIL] Could not determine objective value",
|
||||
data=None
|
||||
))
|
||||
logger.error(" [FAIL] Could not determine objective value")
|
||||
|
||||
return results
|
||||
|
||||
# =========================================================================
|
||||
# Complete Validation Workflow
|
||||
# =========================================================================
|
||||
|
||||
def run_complete_validation(self, user_goal: str, llm_workflow: Optional[Dict[str, Any]] = None) -> Tuple[bool, List[ValidationResult]]:
|
||||
"""
|
||||
Run complete validation workflow from start to finish.
|
||||
|
||||
Steps:
|
||||
1. Introspect model for expressions
|
||||
2. Run baseline simulation
|
||||
3. Introspect OP2 for contents
|
||||
4. Suggest/validate configuration
|
||||
5. Dry-run pipeline validation
|
||||
|
||||
Args:
|
||||
user_goal: User's description of optimization goal
|
||||
llm_workflow: Optional pre-configured workflow (otherwise suggested)
|
||||
|
||||
Returns:
|
||||
Tuple of (success: bool, results: List[ValidationResult])
|
||||
"""
|
||||
logger.info("=" * 80)
|
||||
logger.info("OPTIMIZATION SETUP WIZARD - COMPLETE VALIDATION")
|
||||
logger.info("=" * 80)
|
||||
|
||||
# Step 1: Introspect model
|
||||
self.introspect_model()
|
||||
|
||||
# Step 2: Run baseline
|
||||
self.run_baseline_simulation()
|
||||
|
||||
# Step 3: Introspect OP2
|
||||
self.introspect_op2()
|
||||
|
||||
# Step 4: Get configuration
|
||||
if llm_workflow is None:
|
||||
llm_workflow = self.suggest_configuration(user_goal)
|
||||
|
||||
# Step 5: Validate pipeline
|
||||
validation_results = self.validate_pipeline(llm_workflow)
|
||||
|
||||
# Check if all validations passed
|
||||
all_passed = all(r.success for r in validation_results)
|
||||
|
||||
logger.info("=" * 80)
|
||||
logger.info("VALIDATION SUMMARY")
|
||||
logger.info("=" * 80)
|
||||
|
||||
for result in validation_results:
|
||||
logger.info(result.message)
|
||||
|
||||
if all_passed:
|
||||
logger.info("\n[OK] ALL VALIDATIONS PASSED - Ready for optimization!")
|
||||
else:
|
||||
logger.error("\n[FAIL] VALIDATION FAILED - Fix issues before optimization")
|
||||
|
||||
return all_passed, validation_results
|
||||
|
||||
|
||||
def main():
|
||||
"""Test optimization setup wizard."""
|
||||
import sys
|
||||
|
||||
print("=" * 80)
|
||||
print("Phase 3.3: Optimization Setup Wizard Test")
|
||||
print("=" * 80)
|
||||
print()
|
||||
|
||||
# Configuration
|
||||
prt_file = Path("tests/Bracket.prt")
|
||||
sim_file = Path("tests/Bracket_sim1.sim")
|
||||
|
||||
if not prt_file.exists() or not sim_file.exists():
|
||||
print("ERROR: Test files not found")
|
||||
sys.exit(1)
|
||||
|
||||
# Initialize wizard
|
||||
wizard = OptimizationSetupWizard(prt_file, sim_file)
|
||||
|
||||
# Run complete validation
|
||||
user_goal = "Maximize displacement while keeping stress below yield/4"
|
||||
|
||||
success, results = wizard.run_complete_validation(user_goal)
|
||||
|
||||
if success:
|
||||
print("\n[OK] Pipeline validated! Ready to start optimization.")
|
||||
else:
|
||||
print("\n[FAIL] Validation failed. Review errors above.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
383
optimization_engine/config/template_loader.py
Normal file
383
optimization_engine/config/template_loader.py
Normal file
@@ -0,0 +1,383 @@
|
||||
"""
|
||||
Template Loader for Atomizer Optimization Studies
|
||||
|
||||
Creates new studies from templates with automatic folder structure creation.
|
||||
|
||||
Usage:
|
||||
from optimization_engine.config.template_loader import create_study_from_template, list_templates
|
||||
|
||||
# List available templates
|
||||
templates = list_templates()
|
||||
|
||||
# Create a new study from template
|
||||
create_study_from_template(
|
||||
template_name="beam_stiffness_optimization",
|
||||
study_name="my_beam_study"
|
||||
)
|
||||
"""
|
||||
|
||||
import json
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent / "templates"
|
||||
STUDIES_DIR = Path(__file__).parent.parent / "studies"
|
||||
|
||||
|
||||
def list_templates() -> List[Dict[str, Any]]:
|
||||
"""
|
||||
List all available templates.
|
||||
|
||||
Returns:
|
||||
List of template metadata dictionaries
|
||||
"""
|
||||
templates = []
|
||||
|
||||
if not TEMPLATES_DIR.exists():
|
||||
return templates
|
||||
|
||||
for template_file in TEMPLATES_DIR.glob("*.json"):
|
||||
try:
|
||||
with open(template_file, 'r') as f:
|
||||
config = json.load(f)
|
||||
|
||||
template_info = config.get("template_info", {})
|
||||
templates.append({
|
||||
"name": template_file.stem,
|
||||
"description": config.get("description", "No description"),
|
||||
"category": template_info.get("category", "general"),
|
||||
"analysis_type": template_info.get("analysis_type", "unknown"),
|
||||
"objectives": len(config.get("objectives", [])),
|
||||
"design_variables": len(config.get("design_variables", [])),
|
||||
"path": str(template_file)
|
||||
})
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not load template {template_file}: {e}")
|
||||
|
||||
return templates
|
||||
|
||||
|
||||
def get_template(template_name: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Load a template by name.
|
||||
|
||||
Args:
|
||||
template_name: Name of the template (without .json extension)
|
||||
|
||||
Returns:
|
||||
Template configuration dictionary or None if not found
|
||||
"""
|
||||
template_path = TEMPLATES_DIR / f"{template_name}.json"
|
||||
|
||||
if not template_path.exists():
|
||||
# Try with .json extension already included
|
||||
template_path = TEMPLATES_DIR / template_name
|
||||
if not template_path.exists():
|
||||
return None
|
||||
|
||||
with open(template_path, 'r') as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def create_study_from_template(
|
||||
template_name: str,
|
||||
study_name: str,
|
||||
studies_dir: Optional[Path] = None,
|
||||
overrides: Optional[Dict[str, Any]] = None
|
||||
) -> Path:
|
||||
"""
|
||||
Create a new study from a template.
|
||||
|
||||
Args:
|
||||
template_name: Name of the template to use
|
||||
study_name: Name for the new study
|
||||
studies_dir: Base directory for studies (default: studies/)
|
||||
overrides: Dictionary of config values to override
|
||||
|
||||
Returns:
|
||||
Path to the created study directory
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If template doesn't exist
|
||||
FileExistsError: If study already exists
|
||||
"""
|
||||
if studies_dir is None:
|
||||
studies_dir = STUDIES_DIR
|
||||
|
||||
studies_dir = Path(studies_dir)
|
||||
|
||||
# Load template
|
||||
template = get_template(template_name)
|
||||
if template is None:
|
||||
available = [t["name"] for t in list_templates()]
|
||||
raise FileNotFoundError(
|
||||
f"Template '{template_name}' not found. "
|
||||
f"Available templates: {available}"
|
||||
)
|
||||
|
||||
# Check if study already exists
|
||||
study_path = studies_dir / study_name
|
||||
if study_path.exists():
|
||||
raise FileExistsError(
|
||||
f"Study '{study_name}' already exists at {study_path}. "
|
||||
"Choose a different name or delete the existing study."
|
||||
)
|
||||
|
||||
# Create study directory structure
|
||||
setup_dir = study_path / "1_setup"
|
||||
model_dir = setup_dir / "model"
|
||||
results_dir = study_path / "2_results"
|
||||
|
||||
setup_dir.mkdir(parents=True)
|
||||
model_dir.mkdir()
|
||||
results_dir.mkdir()
|
||||
|
||||
# Customize template for this study
|
||||
config = template.copy()
|
||||
config["study_name"] = study_name
|
||||
config["created_from_template"] = template_name
|
||||
config["created_at"] = datetime.now().isoformat()
|
||||
|
||||
# Update training data export path
|
||||
if "training_data_export" in config:
|
||||
export_dir = config["training_data_export"].get("export_dir", "")
|
||||
if "${study_name}" in export_dir:
|
||||
config["training_data_export"]["export_dir"] = export_dir.replace(
|
||||
"${study_name}", study_name
|
||||
)
|
||||
|
||||
# Apply overrides
|
||||
if overrides:
|
||||
_deep_update(config, overrides)
|
||||
|
||||
# Write configuration
|
||||
config_path = setup_dir / "optimization_config.json"
|
||||
with open(config_path, 'w') as f:
|
||||
json.dump(config, f, indent=2)
|
||||
|
||||
# Create run_optimization.py
|
||||
run_script_content = _generate_run_script(study_name, config)
|
||||
run_script_path = study_path / "run_optimization.py"
|
||||
with open(run_script_path, 'w') as f:
|
||||
f.write(run_script_content)
|
||||
|
||||
# Create README.md
|
||||
readme_content = _generate_study_readme(study_name, config, template_name)
|
||||
readme_path = study_path / "README.md"
|
||||
with open(readme_path, 'w') as f:
|
||||
f.write(readme_content)
|
||||
|
||||
print(f"Created study '{study_name}' from template '{template_name}'")
|
||||
print(f" Location: {study_path}")
|
||||
print(f" Config: {config_path}")
|
||||
print(f"\nNext steps:")
|
||||
print(f" 1. Add your NX model files to: {model_dir}")
|
||||
print(f" 2. Update design variable bounds in optimization_config.json")
|
||||
print(f" 3. Run: python {run_script_path} --trials 50")
|
||||
|
||||
return study_path
|
||||
|
||||
|
||||
def _deep_update(base: Dict, updates: Dict) -> Dict:
|
||||
"""Recursively update a dictionary."""
|
||||
for key, value in updates.items():
|
||||
if key in base and isinstance(base[key], dict) and isinstance(value, dict):
|
||||
_deep_update(base[key], value)
|
||||
else:
|
||||
base[key] = value
|
||||
return base
|
||||
|
||||
|
||||
def _generate_run_script(study_name: str, config: Dict[str, Any]) -> str:
|
||||
"""Generate the run_optimization.py script for a study."""
|
||||
return f'''"""
|
||||
Optimization Runner for {study_name}
|
||||
|
||||
Auto-generated from template: {config.get('created_from_template', 'unknown')}
|
||||
Created: {config.get('created_at', 'unknown')}
|
||||
|
||||
Usage:
|
||||
python run_optimization.py --trials 50
|
||||
python run_optimization.py --trials 25 --resume
|
||||
python run_optimization.py --trials 100 --enable-nn
|
||||
"""
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent.parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
from optimization_engine.study_runner import run_study
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="{config.get('description', study_name)}")
|
||||
parser.add_argument('--trials', type=int, default=30, help='Number of trials to run')
|
||||
parser.add_argument('--resume', action='store_true', help='Resume existing study')
|
||||
parser.add_argument('--enable-nn', action='store_true', help='Enable neural network acceleration')
|
||||
parser.add_argument('--validate-only', action='store_true', help='Only validate setup, do not run')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
study_dir = Path(__file__).parent
|
||||
config_path = study_dir / "1_setup" / "optimization_config.json"
|
||||
|
||||
if args.validate_only:
|
||||
from optimization_engine.validators import validate_study
|
||||
result = validate_study("{study_name}")
|
||||
print(result)
|
||||
return
|
||||
|
||||
run_study(
|
||||
config_path=config_path,
|
||||
n_trials=args.trials,
|
||||
resume=args.resume,
|
||||
enable_neural=args.enable_nn
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
'''
|
||||
|
||||
|
||||
def _generate_study_readme(study_name: str, config: Dict[str, Any], template_name: str) -> str:
|
||||
"""Generate a README.md for the study."""
|
||||
objectives = config.get("objectives", [])
|
||||
design_vars = config.get("design_variables", [])
|
||||
constraints = config.get("constraints", [])
|
||||
|
||||
obj_list = "\n".join([f"- **{o.get('name', 'unnamed')}**: {o.get('goal', 'minimize')} - {o.get('description', '')}" for o in objectives])
|
||||
dv_list = "\n".join([f"- **{d.get('parameter', 'unnamed')}**: [{d.get('bounds', [0, 1])[0]}, {d.get('bounds', [0, 1])[1]}] - {d.get('description', '')}" for d in design_vars])
|
||||
const_list = "\n".join([f"- **{c.get('name', 'unnamed')}**: {c.get('type', 'less_than')} {c.get('threshold', 0)} - {c.get('description', '')}" for c in constraints])
|
||||
|
||||
return f'''# {study_name}
|
||||
|
||||
{config.get('description', 'Optimization study')}
|
||||
|
||||
**Template**: {template_name}
|
||||
**Created**: {config.get('created_at', 'unknown')}
|
||||
|
||||
## Engineering Context
|
||||
|
||||
{config.get('engineering_context', 'No context provided')}
|
||||
|
||||
## Objectives
|
||||
|
||||
{obj_list if obj_list else 'None defined'}
|
||||
|
||||
## Design Variables
|
||||
|
||||
{dv_list if dv_list else 'None defined'}
|
||||
|
||||
## Constraints
|
||||
|
||||
{const_list if const_list else 'None defined'}
|
||||
|
||||
## Setup Instructions
|
||||
|
||||
1. **Add NX Model Files**
|
||||
|
||||
Copy your NX part (.prt), simulation (.sim), and FEM (.fem) files to:
|
||||
```
|
||||
1_setup/model/
|
||||
```
|
||||
|
||||
2. **Configure Design Variables**
|
||||
|
||||
Edit `1_setup/optimization_config.json`:
|
||||
- Ensure `design_variables[].parameter` matches your NX expression names
|
||||
- Adjust bounds to your design space
|
||||
|
||||
3. **Validate Setup**
|
||||
|
||||
```bash
|
||||
python run_optimization.py --validate-only
|
||||
```
|
||||
|
||||
## Running the Optimization
|
||||
|
||||
### Basic Run
|
||||
```bash
|
||||
python run_optimization.py --trials 50
|
||||
```
|
||||
|
||||
### Resume Interrupted Run
|
||||
```bash
|
||||
python run_optimization.py --trials 25 --resume
|
||||
```
|
||||
|
||||
### With Neural Network Acceleration
|
||||
```bash
|
||||
python run_optimization.py --trials 100 --enable-nn
|
||||
```
|
||||
|
||||
## Results
|
||||
|
||||
After optimization, results are saved in `2_results/`:
|
||||
- `study.db` - Optuna database with all trials
|
||||
- `history.json` - Trial history
|
||||
- `optimization_summary.json` - Summary with best parameters
|
||||
|
||||
## Visualization
|
||||
|
||||
View results with Optuna Dashboard:
|
||||
```bash
|
||||
optuna-dashboard sqlite:///2_results/study.db
|
||||
```
|
||||
|
||||
Or generate a report:
|
||||
```bash
|
||||
python -m optimization_engine.generate_report {study_name}
|
||||
```
|
||||
'''
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Atomizer Template Loader")
|
||||
subparsers = parser.add_subparsers(dest="command", help="Commands")
|
||||
|
||||
# List templates
|
||||
list_parser = subparsers.add_parser("list", help="List available templates")
|
||||
|
||||
# Create study
|
||||
create_parser = subparsers.add_parser("create", help="Create study from template")
|
||||
create_parser.add_argument("--template", "-t", required=True, help="Template name")
|
||||
create_parser.add_argument("--name", "-n", required=True, help="Study name")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.command == "list":
|
||||
templates = list_templates()
|
||||
if not templates:
|
||||
print("No templates found in templates/")
|
||||
else:
|
||||
print("Available templates:")
|
||||
print("-" * 60)
|
||||
for t in templates:
|
||||
print(f" {t['name']}")
|
||||
print(f" {t['description']}")
|
||||
print(f" Category: {t['category']} | Analysis: {t['analysis_type']}")
|
||||
print(f" Design vars: {t['design_variables']} | Objectives: {t['objectives']}")
|
||||
print()
|
||||
|
||||
elif args.command == "create":
|
||||
try:
|
||||
study_path = create_study_from_template(
|
||||
template_name=args.template,
|
||||
study_name=args.name
|
||||
)
|
||||
except (FileNotFoundError, FileExistsError) as e:
|
||||
print(f"Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
else:
|
||||
parser.print_help()
|
||||
Reference in New Issue
Block a user