BREAKING CHANGE: Module paths have been reorganized for better maintainability. Backwards compatibility aliases with deprecation warnings are provided. New Structure: - core/ - Optimization runners (runner, intelligent_optimizer, etc.) - processors/ - Data processing - surrogates/ - Neural network surrogates - nx/ - NX/Nastran integration (solver, updater, session_manager) - study/ - Study management (creator, wizard, state, reset) - reporting/ - Reports and analysis (visualizer, report_generator) - config/ - Configuration management (manager, builder) - utils/ - Utilities (logger, auto_doc, etc.) - future/ - Research/experimental code Migration: - ~200 import changes across 125 files - All __init__.py files use lazy loading to avoid circular imports - Backwards compatibility layer supports old import paths with warnings - All existing functionality preserved To migrate existing code: OLD: from optimization_engine.nx_solver import NXSolver NEW: from optimization_engine.nx.solver import NXSolver OLD: from optimization_engine.runner import OptimizationRunner NEW: from optimization_engine.core.runner import OptimizationRunner 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
330 lines
12 KiB
Python
330 lines
12 KiB
Python
"""
|
|
Pruning Logger - Comprehensive tracking of failed trials during optimization.
|
|
|
|
This module provides detailed logging of why trials are pruned, including:
|
|
- Validation failures
|
|
- Simulation failures
|
|
- OP2 extraction failures
|
|
- Parameter values at failure
|
|
- Error messages and stack traces
|
|
|
|
Usage:
|
|
logger = PruningLogger(results_dir=Path("studies/my_study/2_results"))
|
|
|
|
# Log different types of failures
|
|
logger.log_validation_failure(trial_number, params, reasons)
|
|
logger.log_simulation_failure(trial_number, params, error_msg)
|
|
logger.log_op2_extraction_failure(trial_number, params, exception, op2_file)
|
|
|
|
# Generate summary report
|
|
logger.save_summary()
|
|
"""
|
|
|
|
import json
|
|
import traceback
|
|
from pathlib import Path
|
|
from typing import Dict, List, Any, Optional
|
|
from datetime import datetime
|
|
|
|
|
|
class PruningLogger:
|
|
"""Comprehensive logger for tracking pruned trials during optimization."""
|
|
|
|
def __init__(self, results_dir: Path, verbose: bool = True):
|
|
"""
|
|
Initialize pruning logger.
|
|
|
|
Args:
|
|
results_dir: Directory to save pruning logs (typically 2_results/)
|
|
verbose: Print pruning events to console
|
|
"""
|
|
self.results_dir = Path(results_dir)
|
|
self.results_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
self.verbose = verbose
|
|
|
|
# Log file paths
|
|
self.pruning_log_file = self.results_dir / "pruning_history.json"
|
|
self.pruning_summary_file = self.results_dir / "pruning_summary.json"
|
|
|
|
# In-memory log
|
|
self.pruning_events = []
|
|
|
|
# Load existing log if it exists
|
|
if self.pruning_log_file.exists():
|
|
with open(self.pruning_log_file, 'r', encoding='utf-8') as f:
|
|
self.pruning_events = json.load(f)
|
|
|
|
# Statistics
|
|
self.stats = {
|
|
'validation_failures': 0,
|
|
'simulation_failures': 0,
|
|
'op2_extraction_failures': 0,
|
|
'total_pruned': 0
|
|
}
|
|
|
|
def log_validation_failure(
|
|
self,
|
|
trial_number: int,
|
|
design_variables: Dict[str, float],
|
|
validation_warnings: List[str]
|
|
):
|
|
"""
|
|
Log a trial that was pruned due to validation failure.
|
|
|
|
Args:
|
|
trial_number: Trial number
|
|
design_variables: Parameter values that failed validation
|
|
validation_warnings: List of validation error messages
|
|
"""
|
|
event = {
|
|
'trial_number': trial_number,
|
|
'timestamp': datetime.now().isoformat(),
|
|
'pruning_cause': 'validation_failure',
|
|
'design_variables': design_variables,
|
|
'validation_warnings': validation_warnings,
|
|
'details': {
|
|
'validator_rejected': True,
|
|
'warning_count': len(validation_warnings)
|
|
}
|
|
}
|
|
|
|
self._add_event(event)
|
|
self.stats['validation_failures'] += 1
|
|
|
|
if self.verbose:
|
|
print(f"\n[PRUNING LOG] Trial #{trial_number} - Validation Failure")
|
|
print(f" Parameters: {self._format_params(design_variables)}")
|
|
print(f" Reasons: {len(validation_warnings)} validation errors")
|
|
for warning in validation_warnings:
|
|
print(f" - {warning}")
|
|
|
|
def log_simulation_failure(
|
|
self,
|
|
trial_number: int,
|
|
design_variables: Dict[str, float],
|
|
error_message: str,
|
|
return_code: Optional[int] = None,
|
|
solver_errors: Optional[List[str]] = None
|
|
):
|
|
"""
|
|
Log a trial that was pruned due to simulation failure.
|
|
|
|
Args:
|
|
trial_number: Trial number
|
|
design_variables: Parameter values
|
|
error_message: Main error message
|
|
return_code: Solver return code (if available)
|
|
solver_errors: List of solver error messages from F06
|
|
"""
|
|
event = {
|
|
'trial_number': trial_number,
|
|
'timestamp': datetime.now().isoformat(),
|
|
'pruning_cause': 'simulation_failure',
|
|
'design_variables': design_variables,
|
|
'error_message': error_message,
|
|
'details': {
|
|
'return_code': return_code,
|
|
'solver_errors': solver_errors if solver_errors else []
|
|
}
|
|
}
|
|
|
|
self._add_event(event)
|
|
self.stats['simulation_failures'] += 1
|
|
|
|
if self.verbose:
|
|
print(f"\n[PRUNING LOG] Trial #{trial_number} - Simulation Failure")
|
|
print(f" Parameters: {self._format_params(design_variables)}")
|
|
print(f" Error: {error_message}")
|
|
if return_code is not None:
|
|
print(f" Return code: {return_code}")
|
|
if solver_errors:
|
|
print(f" Solver errors:")
|
|
for err in solver_errors[:3]: # Show first 3
|
|
print(f" - {err}")
|
|
|
|
def log_op2_extraction_failure(
|
|
self,
|
|
trial_number: int,
|
|
design_variables: Dict[str, float],
|
|
exception: Exception,
|
|
op2_file: Optional[Path] = None,
|
|
f06_file: Optional[Path] = None
|
|
):
|
|
"""
|
|
Log a trial that was pruned due to OP2 extraction failure.
|
|
|
|
Args:
|
|
trial_number: Trial number
|
|
design_variables: Parameter values
|
|
exception: The exception that was raised
|
|
op2_file: Path to OP2 file (if exists)
|
|
f06_file: Path to F06 file (for reference)
|
|
"""
|
|
# Get full stack trace
|
|
tb = traceback.format_exc()
|
|
|
|
# Check if this is a pyNastran FATAL error
|
|
is_fatal_error = 'FATAL' in str(exception) and 'op2_reader' in tb
|
|
|
|
# Check F06 for actual errors if provided
|
|
f06_has_fatal = False
|
|
f06_errors = []
|
|
if f06_file and f06_file.exists():
|
|
try:
|
|
with open(f06_file, 'r', encoding='latin-1', errors='ignore') as f:
|
|
f06_content = f.read()
|
|
f06_has_fatal = 'FATAL' in f06_content
|
|
# Extract fatal errors
|
|
for line in f06_content.split('\n'):
|
|
if 'FATAL' in line.upper() or 'ERROR' in line.upper():
|
|
f06_errors.append(line.strip())
|
|
except Exception:
|
|
pass
|
|
|
|
event = {
|
|
'trial_number': trial_number,
|
|
'timestamp': datetime.now().isoformat(),
|
|
'pruning_cause': 'op2_extraction_failure',
|
|
'design_variables': design_variables,
|
|
'exception_type': type(exception).__name__,
|
|
'exception_message': str(exception),
|
|
'stack_trace': tb,
|
|
'details': {
|
|
'op2_file': str(op2_file) if op2_file else None,
|
|
'op2_exists': op2_file.exists() if op2_file else False,
|
|
'op2_size_bytes': op2_file.stat().st_size if (op2_file and op2_file.exists()) else 0,
|
|
'f06_file': str(f06_file) if f06_file else None,
|
|
'is_pynastran_fatal_flag': is_fatal_error,
|
|
'f06_has_fatal_errors': f06_has_fatal,
|
|
'f06_errors': f06_errors[:5] # First 5 errors
|
|
}
|
|
}
|
|
|
|
self._add_event(event)
|
|
self.stats['op2_extraction_failures'] += 1
|
|
|
|
if self.verbose:
|
|
print(f"\n[PRUNING LOG] Trial #{trial_number} - OP2 Extraction Failure")
|
|
print(f" Parameters: {self._format_params(design_variables)}")
|
|
print(f" Exception: {type(exception).__name__}: {str(exception)}")
|
|
if is_fatal_error and not f06_has_fatal:
|
|
print(f" WARNING: pyNastran detected FATAL flag in OP2 header")
|
|
print(f" BUT F06 file has NO FATAL errors!")
|
|
print(f" This is likely a false positive - simulation may have succeeded")
|
|
if op2_file:
|
|
print(f" OP2 file: {op2_file.name} ({'exists' if op2_file.exists() else 'missing'})")
|
|
if op2_file.exists():
|
|
print(f" OP2 size: {op2_file.stat().st_size:,} bytes")
|
|
|
|
def _add_event(self, event: Dict[str, Any]):
|
|
"""Add event to log and save to disk."""
|
|
self.pruning_events.append(event)
|
|
self.stats['total_pruned'] = len(self.pruning_events)
|
|
|
|
# Save incrementally
|
|
self._save_log()
|
|
|
|
def _save_log(self):
|
|
"""Save pruning log to disk."""
|
|
with open(self.pruning_log_file, 'w', encoding='utf-8') as f:
|
|
json.dump(self.pruning_events, f, indent=2)
|
|
|
|
def save_summary(self) -> Dict[str, Any]:
|
|
"""
|
|
Generate and save pruning summary report.
|
|
|
|
Returns:
|
|
Summary dictionary
|
|
"""
|
|
# Analyze patterns
|
|
validation_reasons = {}
|
|
simulation_errors = {}
|
|
op2_false_positives = 0
|
|
|
|
for event in self.pruning_events:
|
|
if event['pruning_cause'] == 'validation_failure':
|
|
for warning in event['validation_warnings']:
|
|
validation_reasons[warning] = validation_reasons.get(warning, 0) + 1
|
|
|
|
elif event['pruning_cause'] == 'simulation_failure':
|
|
error = event['error_message']
|
|
simulation_errors[error] = simulation_errors.get(error, 0) + 1
|
|
|
|
elif event['pruning_cause'] == 'op2_extraction_failure':
|
|
if event['details'].get('is_pynastran_fatal_flag') and not event['details'].get('f06_has_fatal_errors'):
|
|
op2_false_positives += 1
|
|
|
|
summary = {
|
|
'generated': datetime.now().isoformat(),
|
|
'total_pruned_trials': self.stats['total_pruned'],
|
|
'breakdown': {
|
|
'validation_failures': self.stats['validation_failures'],
|
|
'simulation_failures': self.stats['simulation_failures'],
|
|
'op2_extraction_failures': self.stats['op2_extraction_failures']
|
|
},
|
|
'validation_failure_reasons': validation_reasons,
|
|
'simulation_failure_types': simulation_errors,
|
|
'op2_extraction_analysis': {
|
|
'total_op2_failures': self.stats['op2_extraction_failures'],
|
|
'likely_false_positives': op2_false_positives,
|
|
'description': 'False positives are OP2 extraction failures where pyNastran detected FATAL flag but F06 has no errors'
|
|
},
|
|
'recommendations': self._generate_recommendations(op2_false_positives)
|
|
}
|
|
|
|
# Save summary
|
|
with open(self.pruning_summary_file, 'w', encoding='utf-8') as f:
|
|
json.dump(summary, f, indent=2)
|
|
|
|
if self.verbose:
|
|
print(f"\n[PRUNING SUMMARY] Saved to {self.pruning_summary_file}")
|
|
print(f" Total pruned: {summary['total_pruned_trials']}")
|
|
print(f" Validation failures: {summary['breakdown']['validation_failures']}")
|
|
print(f" Simulation failures: {summary['breakdown']['simulation_failures']}")
|
|
print(f" OP2 extraction failures: {summary['breakdown']['op2_extraction_failures']}")
|
|
if op2_false_positives > 0:
|
|
print(f"\n WARNING: {op2_false_positives} likely FALSE POSITIVES detected!")
|
|
print(f" These are pyNastran OP2 reader issues, not real failures")
|
|
|
|
return summary
|
|
|
|
def _generate_recommendations(self, op2_false_positives: int) -> List[str]:
|
|
"""Generate recommendations based on pruning patterns."""
|
|
recommendations = []
|
|
|
|
if op2_false_positives > 0:
|
|
recommendations.append(
|
|
f"CRITICAL: {op2_false_positives} trials failed due to pyNastran OP2 reader being overly strict. "
|
|
f"Use robust_extract_first_frequency() to ignore benign FATAL flags and extract valid results."
|
|
)
|
|
|
|
if self.stats['validation_failures'] == 0 and self.stats['simulation_failures'] > 0:
|
|
recommendations.append(
|
|
"Consider adding validation rules to catch simulation failures earlier "
|
|
"(saves ~30 seconds per invalid trial)."
|
|
)
|
|
|
|
if self.stats['total_pruned'] == 0:
|
|
recommendations.append("Excellent! No pruning detected - all trials succeeded.")
|
|
|
|
return recommendations
|
|
|
|
def _format_params(self, params: Dict[str, float]) -> str:
|
|
"""Format parameters for display."""
|
|
return ", ".join(f"{k}={v:.2f}" for k, v in params.items())
|
|
|
|
|
|
def create_pruning_logger(results_dir: Path, verbose: bool = True) -> PruningLogger:
|
|
"""
|
|
Convenience function to create a pruning logger.
|
|
|
|
Args:
|
|
results_dir: Results directory for the study
|
|
verbose: Print pruning events to console
|
|
|
|
Returns:
|
|
PruningLogger instance
|
|
"""
|
|
return PruningLogger(results_dir, verbose)
|