576 lines
20 KiB
Python
576 lines
20 KiB
Python
|
|
"""
|
||
|
|
Optimization Setup Wizard - Phase 3.3
|
||
|
|
|
||
|
|
Interactive wizard that validates the complete optimization pipeline BEFORE running trials:
|
||
|
|
1. Introspect NX model for available expressions
|
||
|
|
2. Run baseline simulation to generate OP2
|
||
|
|
3. Introspect OP2 file to detect element types and available results
|
||
|
|
4. LLM-guided configuration based on actual model contents
|
||
|
|
5. Dry-run pipeline validation with baseline OP2
|
||
|
|
6. Report success/failure before starting optimization
|
||
|
|
|
||
|
|
This prevents wasted time running optimizations that will fail!
|
||
|
|
|
||
|
|
Author: Atomizer Development Team
|
||
|
|
Version: 0.1.0 (Phase 3.3)
|
||
|
|
Last Updated: 2025-01-16
|
||
|
|
"""
|
||
|
|
|
||
|
|
from pathlib import Path
|
||
|
|
from typing import Dict, Any, List, Optional, Tuple
|
||
|
|
import logging
|
||
|
|
from dataclasses import dataclass
|
||
|
|
|
||
|
|
from optimization_engine.nx_updater import NXParameterUpdater
|
||
|
|
from optimization_engine.nx_solver import NXSolver
|
||
|
|
from optimization_engine.extractor_orchestrator import ExtractorOrchestrator
|
||
|
|
from optimization_engine.inline_code_generator import InlineCodeGenerator
|
||
|
|
from optimization_engine.plugins.hook_manager import HookManager
|
||
|
|
|
||
|
|
logger = logging.getLogger(__name__)
|
||
|
|
|
||
|
|
|
||
|
|
@dataclass
|
||
|
|
class ModelIntrospection:
|
||
|
|
"""Results from NX model introspection."""
|
||
|
|
expressions: Dict[str, Any] # {name: {'value': float, 'formula': str}}
|
||
|
|
prt_file: Path
|
||
|
|
sim_file: Path
|
||
|
|
|
||
|
|
|
||
|
|
@dataclass
|
||
|
|
class OP2Introspection:
|
||
|
|
"""Results from OP2 file introspection."""
|
||
|
|
element_types: List[str] # e.g., ['CHEXA', 'CPENTA', 'CTETRA']
|
||
|
|
result_types: List[str] # e.g., ['displacement', 'stress']
|
||
|
|
subcases: List[int] # e.g., [1]
|
||
|
|
node_count: int
|
||
|
|
element_count: int
|
||
|
|
op2_file: Path
|
||
|
|
|
||
|
|
|
||
|
|
@dataclass
|
||
|
|
class ValidationResult:
|
||
|
|
"""Result from pipeline validation."""
|
||
|
|
success: bool
|
||
|
|
component: str # 'extractor', 'calculation', 'hook', 'objective'
|
||
|
|
message: str
|
||
|
|
data: Optional[Dict[str, Any]] = None
|
||
|
|
|
||
|
|
|
||
|
|
class OptimizationSetupWizard:
|
||
|
|
"""
|
||
|
|
Interactive wizard for validating optimization setup before running trials.
|
||
|
|
|
||
|
|
This wizard prevents common mistakes by:
|
||
|
|
- Checking model expressions exist
|
||
|
|
- Validating OP2 file contains expected results
|
||
|
|
- Testing extractors on real data
|
||
|
|
- Confirming calculations work
|
||
|
|
- Verifying complete pipeline before optimization
|
||
|
|
"""
|
||
|
|
|
||
|
|
def __init__(self, prt_file: Path, sim_file: Path, output_dir: Optional[Path] = None):
|
||
|
|
"""
|
||
|
|
Initialize optimization setup wizard.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
prt_file: Path to NX part file (.prt)
|
||
|
|
sim_file: Path to NX simulation file (.sim)
|
||
|
|
output_dir: Directory for validation outputs
|
||
|
|
"""
|
||
|
|
self.prt_file = Path(prt_file)
|
||
|
|
self.sim_file = Path(sim_file)
|
||
|
|
|
||
|
|
if output_dir is None:
|
||
|
|
output_dir = Path.cwd() / "optimization_validation"
|
||
|
|
self.output_dir = Path(output_dir)
|
||
|
|
self.output_dir.mkdir(parents=True, exist_ok=True)
|
||
|
|
|
||
|
|
self.model_info: Optional[ModelIntrospection] = None
|
||
|
|
self.op2_info: Optional[OP2Introspection] = None
|
||
|
|
self.baseline_op2: Optional[Path] = None
|
||
|
|
|
||
|
|
logger.info(f"OptimizationSetupWizard initialized")
|
||
|
|
logger.info(f" Part: {self.prt_file}")
|
||
|
|
logger.info(f" Sim: {self.sim_file}")
|
||
|
|
logger.info(f" Output: {self.output_dir}")
|
||
|
|
|
||
|
|
# =========================================================================
|
||
|
|
# STEP 1: Model Introspection
|
||
|
|
# =========================================================================
|
||
|
|
|
||
|
|
def introspect_model(self) -> ModelIntrospection:
|
||
|
|
"""
|
||
|
|
Introspect NX model to find available expressions.
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
ModelIntrospection with all expressions found
|
||
|
|
"""
|
||
|
|
logger.info("=" * 80)
|
||
|
|
logger.info("STEP 1: Introspecting NX Model")
|
||
|
|
logger.info("=" * 80)
|
||
|
|
|
||
|
|
# Use NXParameterUpdater to read expressions
|
||
|
|
updater = NXParameterUpdater(prt_file_path=self.prt_file)
|
||
|
|
expressions = updater.get_all_expressions()
|
||
|
|
|
||
|
|
logger.info(f"Found {len(expressions)} expressions in model:")
|
||
|
|
for name, info in expressions.items():
|
||
|
|
logger.info(f" - {name}: {info.get('value')} ({info.get('formula', 'N/A')})")
|
||
|
|
|
||
|
|
self.model_info = ModelIntrospection(
|
||
|
|
expressions=expressions,
|
||
|
|
prt_file=self.prt_file,
|
||
|
|
sim_file=self.sim_file
|
||
|
|
)
|
||
|
|
|
||
|
|
return self.model_info
|
||
|
|
|
||
|
|
# =========================================================================
|
||
|
|
# STEP 2: Baseline Simulation
|
||
|
|
# =========================================================================
|
||
|
|
|
||
|
|
def run_baseline_simulation(self) -> Path:
|
||
|
|
"""
|
||
|
|
Run baseline simulation with current expression values.
|
||
|
|
|
||
|
|
This generates an OP2 file that we can introspect to see what
|
||
|
|
element types and results are actually present.
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
Path to generated OP2 file
|
||
|
|
"""
|
||
|
|
logger.info("=" * 80)
|
||
|
|
logger.info("STEP 2: Running Baseline Simulation")
|
||
|
|
logger.info("=" * 80)
|
||
|
|
logger.info("This generates OP2 file for introspection...")
|
||
|
|
|
||
|
|
solver = NXSolver(nastran_version='2412', use_journal=True)
|
||
|
|
result = solver.run_simulation(self.sim_file)
|
||
|
|
|
||
|
|
self.baseline_op2 = result['op2_file']
|
||
|
|
logger.info(f"Baseline simulation complete!")
|
||
|
|
logger.info(f" OP2 file: {self.baseline_op2}")
|
||
|
|
|
||
|
|
return self.baseline_op2
|
||
|
|
|
||
|
|
# =========================================================================
|
||
|
|
# STEP 3: OP2 Introspection
|
||
|
|
# =========================================================================
|
||
|
|
|
||
|
|
def introspect_op2(self, op2_file: Optional[Path] = None) -> OP2Introspection:
|
||
|
|
"""
|
||
|
|
Introspect OP2 file to detect element types and available results.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
op2_file: Path to OP2 file (uses baseline if not provided)
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
OP2Introspection with detected contents
|
||
|
|
"""
|
||
|
|
logger.info("=" * 80)
|
||
|
|
logger.info("STEP 3: Introspecting OP2 File")
|
||
|
|
logger.info("=" * 80)
|
||
|
|
|
||
|
|
if op2_file is None:
|
||
|
|
op2_file = self.baseline_op2
|
||
|
|
|
||
|
|
if op2_file is None:
|
||
|
|
raise ValueError("No OP2 file available. Run baseline simulation first.")
|
||
|
|
|
||
|
|
# Use pyNastran to read OP2 and detect contents
|
||
|
|
from pyNastran.op2.op2 import OP2
|
||
|
|
|
||
|
|
model = OP2()
|
||
|
|
model.read_op2(str(op2_file))
|
||
|
|
|
||
|
|
# Detect element types with stress results
|
||
|
|
# In pyNastran, stress results are stored in model.op2_results.stress
|
||
|
|
element_types = []
|
||
|
|
|
||
|
|
# Dynamically discover ALL element types with stress data from pyNastran
|
||
|
|
# Instead of hardcoding, we introspect what pyNastran actually has!
|
||
|
|
if hasattr(model, 'op2_results') and hasattr(model.op2_results, 'stress'):
|
||
|
|
stress_obj = model.op2_results.stress
|
||
|
|
|
||
|
|
# Find all attributes ending with '_stress' that have data
|
||
|
|
for attr_name in dir(stress_obj):
|
||
|
|
if attr_name.endswith('_stress') and not attr_name.startswith('_'):
|
||
|
|
# Check if this element type has data
|
||
|
|
element_data = getattr(stress_obj, attr_name, None)
|
||
|
|
if element_data: # Has data
|
||
|
|
# Convert attribute name to element type
|
||
|
|
# e.g., 'chexa_stress' -> 'CHEXA'
|
||
|
|
element_type = attr_name.replace('_stress', '').upper()
|
||
|
|
|
||
|
|
# Handle special cases (composite elements)
|
||
|
|
if '_composite' not in attr_name:
|
||
|
|
element_types.append(element_type)
|
||
|
|
|
||
|
|
# Also check for forces (stored differently in pyNastran)
|
||
|
|
# Bar/beam forces are at model level, not in stress object
|
||
|
|
if hasattr(model, 'cbar_force') and model.cbar_force:
|
||
|
|
element_types.append('CBAR')
|
||
|
|
if hasattr(model, 'cbeam_force') and model.cbeam_force:
|
||
|
|
element_types.append('CBEAM')
|
||
|
|
if hasattr(model, 'crod_force') and model.crod_force:
|
||
|
|
element_types.append('CROD')
|
||
|
|
|
||
|
|
# Detect result types
|
||
|
|
result_types = []
|
||
|
|
if hasattr(model, 'displacements') and model.displacements:
|
||
|
|
result_types.append('displacement')
|
||
|
|
if element_types: # Has stress
|
||
|
|
result_types.append('stress')
|
||
|
|
if hasattr(model, 'cbar_force') and model.cbar_force:
|
||
|
|
result_types.append('force')
|
||
|
|
|
||
|
|
# Get subcases
|
||
|
|
subcases = []
|
||
|
|
if hasattr(model, 'displacements') and model.displacements:
|
||
|
|
subcases = list(model.displacements.keys())
|
||
|
|
|
||
|
|
# Get counts
|
||
|
|
node_count = len(model.nodes) if hasattr(model, 'nodes') else 0
|
||
|
|
element_count = len(model.elements) if hasattr(model, 'elements') else 0
|
||
|
|
|
||
|
|
logger.info(f"OP2 Introspection Results:")
|
||
|
|
logger.info(f" Element types with stress: {element_types}")
|
||
|
|
logger.info(f" Result types available: {result_types}")
|
||
|
|
logger.info(f" Subcases: {subcases}")
|
||
|
|
logger.info(f" Nodes: {node_count}")
|
||
|
|
logger.info(f" Elements: {element_count}")
|
||
|
|
|
||
|
|
self.op2_info = OP2Introspection(
|
||
|
|
element_types=element_types,
|
||
|
|
result_types=result_types,
|
||
|
|
subcases=subcases,
|
||
|
|
node_count=node_count,
|
||
|
|
element_count=element_count,
|
||
|
|
op2_file=op2_file
|
||
|
|
)
|
||
|
|
|
||
|
|
return self.op2_info
|
||
|
|
|
||
|
|
# =========================================================================
|
||
|
|
# STEP 4: LLM-Guided Configuration
|
||
|
|
# =========================================================================
|
||
|
|
|
||
|
|
def suggest_configuration(self, user_goal: str) -> Dict[str, Any]:
|
||
|
|
"""
|
||
|
|
Use LLM to suggest configuration based on user goal and available data.
|
||
|
|
|
||
|
|
This would analyze:
|
||
|
|
- User's natural language description
|
||
|
|
- Available expressions in model
|
||
|
|
- Available element types in OP2
|
||
|
|
- Available result types in OP2
|
||
|
|
|
||
|
|
And propose a concrete configuration.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
user_goal: User's description of optimization goal
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
Suggested configuration dict
|
||
|
|
"""
|
||
|
|
logger.info("=" * 80)
|
||
|
|
logger.info("STEP 4: LLM-Guided Configuration")
|
||
|
|
logger.info("=" * 80)
|
||
|
|
logger.info(f"User goal: {user_goal}")
|
||
|
|
|
||
|
|
# TODO: Implement LLM analysis
|
||
|
|
# For now, return a manual suggestion based on OP2 contents
|
||
|
|
|
||
|
|
if self.op2_info is None:
|
||
|
|
raise ValueError("OP2 not introspected. Run introspect_op2() first.")
|
||
|
|
|
||
|
|
# Suggest extractors based on available result types
|
||
|
|
engineering_features = []
|
||
|
|
|
||
|
|
if 'displacement' in self.op2_info.result_types:
|
||
|
|
engineering_features.append({
|
||
|
|
'action': 'extract_displacement',
|
||
|
|
'domain': 'result_extraction',
|
||
|
|
'description': 'Extract displacement results from OP2 file',
|
||
|
|
'params': {'result_type': 'displacement'}
|
||
|
|
})
|
||
|
|
|
||
|
|
if 'stress' in self.op2_info.result_types and self.op2_info.element_types:
|
||
|
|
# Use first available element type
|
||
|
|
element_type = self.op2_info.element_types[0].lower()
|
||
|
|
engineering_features.append({
|
||
|
|
'action': 'extract_solid_stress',
|
||
|
|
'domain': 'result_extraction',
|
||
|
|
'description': f'Extract stress from {element_type.upper()} elements',
|
||
|
|
'params': {
|
||
|
|
'result_type': 'stress',
|
||
|
|
'element_type': element_type
|
||
|
|
}
|
||
|
|
})
|
||
|
|
|
||
|
|
logger.info(f"Suggested configuration:")
|
||
|
|
logger.info(f" Engineering features: {len(engineering_features)}")
|
||
|
|
for feat in engineering_features:
|
||
|
|
logger.info(f" - {feat['action']}: {feat['description']}")
|
||
|
|
|
||
|
|
return {
|
||
|
|
'engineering_features': engineering_features,
|
||
|
|
'inline_calculations': [],
|
||
|
|
'post_processing_hooks': []
|
||
|
|
}
|
||
|
|
|
||
|
|
# =========================================================================
|
||
|
|
# STEP 5: Pipeline Validation (Dry Run)
|
||
|
|
# =========================================================================
|
||
|
|
|
||
|
|
def validate_pipeline(self, llm_workflow: Dict[str, Any]) -> List[ValidationResult]:
|
||
|
|
"""
|
||
|
|
Validate complete pipeline with baseline OP2 file.
|
||
|
|
|
||
|
|
This executes the entire extraction/calculation/hook pipeline
|
||
|
|
using the baseline OP2 to ensure everything works BEFORE
|
||
|
|
starting the optimization.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
llm_workflow: Complete LLM workflow configuration
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
List of ValidationResult objects
|
||
|
|
"""
|
||
|
|
logger.info("=" * 80)
|
||
|
|
logger.info("STEP 5: Pipeline Validation (Dry Run)")
|
||
|
|
logger.info("=" * 80)
|
||
|
|
|
||
|
|
if self.baseline_op2 is None:
|
||
|
|
raise ValueError("No baseline OP2 file. Run baseline simulation first.")
|
||
|
|
|
||
|
|
results = []
|
||
|
|
|
||
|
|
# Validate extractors
|
||
|
|
logger.info("\nValidating extractors...")
|
||
|
|
orchestrator = ExtractorOrchestrator(
|
||
|
|
extractors_dir=self.output_dir / "generated_extractors"
|
||
|
|
)
|
||
|
|
|
||
|
|
extractors = orchestrator.process_llm_workflow(llm_workflow)
|
||
|
|
extraction_results = {}
|
||
|
|
|
||
|
|
for extractor in extractors:
|
||
|
|
try:
|
||
|
|
# Pass extractor params (like element_type) to execution
|
||
|
|
result = orchestrator.execute_extractor(
|
||
|
|
extractor.name,
|
||
|
|
self.baseline_op2,
|
||
|
|
subcase=1,
|
||
|
|
**extractor.params # Pass params from workflow (element_type, etc.)
|
||
|
|
)
|
||
|
|
extraction_results.update(result)
|
||
|
|
|
||
|
|
results.append(ValidationResult(
|
||
|
|
success=True,
|
||
|
|
component='extractor',
|
||
|
|
message=f"[OK] {extractor.name}: {list(result.keys())}",
|
||
|
|
data=result
|
||
|
|
))
|
||
|
|
logger.info(f" [OK] {extractor.name}: {list(result.keys())}")
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
results.append(ValidationResult(
|
||
|
|
success=False,
|
||
|
|
component='extractor',
|
||
|
|
message=f"[FAIL] {extractor.name}: {str(e)}",
|
||
|
|
data=None
|
||
|
|
))
|
||
|
|
logger.error(f" [FAIL] {extractor.name}: {str(e)}")
|
||
|
|
|
||
|
|
# Validate inline calculations
|
||
|
|
logger.info("\nValidating inline calculations...")
|
||
|
|
inline_generator = InlineCodeGenerator()
|
||
|
|
calculations = {}
|
||
|
|
calc_namespace = {**extraction_results, **calculations}
|
||
|
|
|
||
|
|
for calc_spec in llm_workflow.get('inline_calculations', []):
|
||
|
|
try:
|
||
|
|
generated = inline_generator.generate_from_llm_output(calc_spec)
|
||
|
|
exec(generated.code, calc_namespace)
|
||
|
|
|
||
|
|
# Extract newly created variables
|
||
|
|
for key, value in calc_namespace.items():
|
||
|
|
if key not in extraction_results and not key.startswith('_'):
|
||
|
|
calculations[key] = value
|
||
|
|
|
||
|
|
results.append(ValidationResult(
|
||
|
|
success=True,
|
||
|
|
component='calculation',
|
||
|
|
message=f"[OK] {calc_spec.get('action', 'calculation')}: Created {list(calculations.keys())}",
|
||
|
|
data=calculations
|
||
|
|
))
|
||
|
|
logger.info(f" [OK] {calc_spec.get('action', 'calculation')}")
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
results.append(ValidationResult(
|
||
|
|
success=False,
|
||
|
|
component='calculation',
|
||
|
|
message=f"[FAIL] {calc_spec.get('action', 'calculation')}: {str(e)}",
|
||
|
|
data=None
|
||
|
|
))
|
||
|
|
logger.error(f" [FAIL] {calc_spec.get('action', 'calculation')}: {str(e)}")
|
||
|
|
|
||
|
|
# Validate hooks
|
||
|
|
logger.info("\nValidating hooks...")
|
||
|
|
hook_manager = HookManager()
|
||
|
|
|
||
|
|
# Load system hooks
|
||
|
|
system_hooks_dir = Path(__file__).parent / 'plugins'
|
||
|
|
if system_hooks_dir.exists():
|
||
|
|
hook_manager.load_plugins_from_directory(system_hooks_dir)
|
||
|
|
|
||
|
|
hook_results = hook_manager.execute_hooks('post_calculation', {
|
||
|
|
'trial_number': 0,
|
||
|
|
'design_variables': {},
|
||
|
|
'results': extraction_results,
|
||
|
|
'calculations': calculations
|
||
|
|
})
|
||
|
|
|
||
|
|
if hook_results:
|
||
|
|
results.append(ValidationResult(
|
||
|
|
success=True,
|
||
|
|
component='hook',
|
||
|
|
message=f"[OK] Hooks executed: {len(hook_results)} results",
|
||
|
|
data={'hook_results': hook_results}
|
||
|
|
))
|
||
|
|
logger.info(f" [OK] Executed {len(hook_results)} hook(s)")
|
||
|
|
|
||
|
|
# Check for objective
|
||
|
|
logger.info("\nValidating objective...")
|
||
|
|
objective = None
|
||
|
|
|
||
|
|
for hook_result in hook_results:
|
||
|
|
if hook_result and 'objective' in hook_result:
|
||
|
|
objective = hook_result['objective']
|
||
|
|
break
|
||
|
|
|
||
|
|
if objective is None:
|
||
|
|
# Try to find objective in calculations or results
|
||
|
|
for key in ['max_displacement', 'max_stress', 'max_von_mises']:
|
||
|
|
if key in {**extraction_results, **calculations}:
|
||
|
|
objective = {**extraction_results, **calculations}[key]
|
||
|
|
logger.warning(f" [WARNING] No explicit objective, using: {key}")
|
||
|
|
break
|
||
|
|
|
||
|
|
if objective is not None:
|
||
|
|
results.append(ValidationResult(
|
||
|
|
success=True,
|
||
|
|
component='objective',
|
||
|
|
message=f"[OK] Objective value: {objective}",
|
||
|
|
data={'objective': objective}
|
||
|
|
))
|
||
|
|
logger.info(f" [OK] Objective value: {objective}")
|
||
|
|
else:
|
||
|
|
results.append(ValidationResult(
|
||
|
|
success=False,
|
||
|
|
component='objective',
|
||
|
|
message="[FAIL] Could not determine objective value",
|
||
|
|
data=None
|
||
|
|
))
|
||
|
|
logger.error(" [FAIL] Could not determine objective value")
|
||
|
|
|
||
|
|
return results
|
||
|
|
|
||
|
|
# =========================================================================
|
||
|
|
# Complete Validation Workflow
|
||
|
|
# =========================================================================
|
||
|
|
|
||
|
|
def run_complete_validation(self, user_goal: str, llm_workflow: Optional[Dict[str, Any]] = None) -> Tuple[bool, List[ValidationResult]]:
|
||
|
|
"""
|
||
|
|
Run complete validation workflow from start to finish.
|
||
|
|
|
||
|
|
Steps:
|
||
|
|
1. Introspect model for expressions
|
||
|
|
2. Run baseline simulation
|
||
|
|
3. Introspect OP2 for contents
|
||
|
|
4. Suggest/validate configuration
|
||
|
|
5. Dry-run pipeline validation
|
||
|
|
|
||
|
|
Args:
|
||
|
|
user_goal: User's description of optimization goal
|
||
|
|
llm_workflow: Optional pre-configured workflow (otherwise suggested)
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
Tuple of (success: bool, results: List[ValidationResult])
|
||
|
|
"""
|
||
|
|
logger.info("=" * 80)
|
||
|
|
logger.info("OPTIMIZATION SETUP WIZARD - COMPLETE VALIDATION")
|
||
|
|
logger.info("=" * 80)
|
||
|
|
|
||
|
|
# Step 1: Introspect model
|
||
|
|
self.introspect_model()
|
||
|
|
|
||
|
|
# Step 2: Run baseline
|
||
|
|
self.run_baseline_simulation()
|
||
|
|
|
||
|
|
# Step 3: Introspect OP2
|
||
|
|
self.introspect_op2()
|
||
|
|
|
||
|
|
# Step 4: Get configuration
|
||
|
|
if llm_workflow is None:
|
||
|
|
llm_workflow = self.suggest_configuration(user_goal)
|
||
|
|
|
||
|
|
# Step 5: Validate pipeline
|
||
|
|
validation_results = self.validate_pipeline(llm_workflow)
|
||
|
|
|
||
|
|
# Check if all validations passed
|
||
|
|
all_passed = all(r.success for r in validation_results)
|
||
|
|
|
||
|
|
logger.info("=" * 80)
|
||
|
|
logger.info("VALIDATION SUMMARY")
|
||
|
|
logger.info("=" * 80)
|
||
|
|
|
||
|
|
for result in validation_results:
|
||
|
|
logger.info(result.message)
|
||
|
|
|
||
|
|
if all_passed:
|
||
|
|
logger.info("\n[OK] ALL VALIDATIONS PASSED - Ready for optimization!")
|
||
|
|
else:
|
||
|
|
logger.error("\n[FAIL] VALIDATION FAILED - Fix issues before optimization")
|
||
|
|
|
||
|
|
return all_passed, validation_results
|
||
|
|
|
||
|
|
|
||
|
|
def main():
|
||
|
|
"""Test optimization setup wizard."""
|
||
|
|
import sys
|
||
|
|
|
||
|
|
print("=" * 80)
|
||
|
|
print("Phase 3.3: Optimization Setup Wizard Test")
|
||
|
|
print("=" * 80)
|
||
|
|
print()
|
||
|
|
|
||
|
|
# Configuration
|
||
|
|
prt_file = Path("tests/Bracket.prt")
|
||
|
|
sim_file = Path("tests/Bracket_sim1.sim")
|
||
|
|
|
||
|
|
if not prt_file.exists() or not sim_file.exists():
|
||
|
|
print("ERROR: Test files not found")
|
||
|
|
sys.exit(1)
|
||
|
|
|
||
|
|
# Initialize wizard
|
||
|
|
wizard = OptimizationSetupWizard(prt_file, sim_file)
|
||
|
|
|
||
|
|
# Run complete validation
|
||
|
|
user_goal = "Maximize displacement while keeping stress below yield/4"
|
||
|
|
|
||
|
|
success, results = wizard.run_complete_validation(user_goal)
|
||
|
|
|
||
|
|
if success:
|
||
|
|
print("\n[OK] Pipeline validated! Ready to start optimization.")
|
||
|
|
else:
|
||
|
|
print("\n[FAIL] Validation failed. Review errors above.")
|
||
|
|
sys.exit(1)
|
||
|
|
|
||
|
|
|
||
|
|
if __name__ == '__main__':
|
||
|
|
main()
|