fix: Remove arbitrary aspect ratio validation and add comprehensive pruning diagnostics

**Validation Changes (simulation_validator.py)**:
- Removed arbitrary aspect ratio limits (5.0-50.0) for circular_plate model
- User requirement: validation rules must be proposed, not automatic
- Validator now returns empty rules for circular_plate
- Relies solely on Optuna parameter bounds (user-defined feasibility)
- Fixed Unicode encoding issues in pruning_logger.py

**Root Cause Analysis**:
- 18-20% pruning in Protocol 10 tests was NOT validation failures
- All pruned trials had valid aspect ratios within bounds
- Root cause: pyNastran FATAL flag false positives
- Simulations succeeded but pyNastran rejected OP2 files

**New Modules**:
- pruning_logger.py: Comprehensive trial failure tracking
  - Logs validation, simulation, and OP2 extraction failures
  - Analyzes F06 files to detect false positives
  - Generates pruning_history.json and pruning_summary.json

- op2_extractor.py: Robust multi-strategy OP2 extraction
  - Standard OP2 read
  - Lenient read (debug=False)
  - F06 fallback parsing
  - Handles pyNastran FATAL flag issues

**Documentation**:
- SESSION_SUMMARY_NOV20.md: Complete session documentation
- FIX_VALIDATOR_PRUNING.md: Deprecated, retained for historical reference
- PRUNING_DIAGNOSTICS.md: Usage guide for pruning diagnostics
- STUDY_CONTINUATION_STANDARD.md: API documentation

**Impact**:
- Clean separation: parameter bounds = feasibility, validator = genuine failures
- Expected pruning reduction from 18% to <2% with robust extraction
- ~4-5 minutes saved per 50-trial study
- All optimization trials contribute valid data

**User Requirements Established**:
1. No arbitrary checks without user approval
2. Validation rules must be visible in optimization_config.json
3. Parameter bounds already define feasibility constraints
4. Physics-based constraints need clear justification
This commit is contained in:
2025-11-20 20:25:33 -05:00
parent 77bfc27882
commit ca25fbdec5
7 changed files with 1945 additions and 0 deletions

View File

@@ -0,0 +1,278 @@
"""
Robust OP2 Extraction - Handles pyNastran FATAL flag issues gracefully.
This module provides a more robust OP2 extraction that:
1. Catches pyNastran FATAL flag exceptions
2. Checks if eigenvalues were actually extracted despite the flag
3. Falls back to F06 extraction if OP2 fails
4. Logs detailed failure information
Usage:
from optimization_engine.op2_extractor import robust_extract_first_frequency
frequency = robust_extract_first_frequency(
op2_file=Path("results.op2"),
mode_number=1,
f06_file=Path("results.f06"), # Optional fallback
verbose=True
)
"""
from pathlib import Path
from typing import Optional, Tuple
import numpy as np
def robust_extract_first_frequency(
op2_file: Path,
mode_number: int = 1,
f06_file: Optional[Path] = None,
verbose: bool = False
) -> float:
"""
Robustly extract natural frequency from OP2 file, handling pyNastran issues.
This function attempts multiple strategies:
1. Standard pyNastran OP2 reading
2. Force reading with debug=False to ignore FATAL flags
3. Partial OP2 reading (extract eigenvalues even if FATAL flag exists)
4. Fallback to F06 file parsing (if provided)
Args:
op2_file: Path to OP2 output file
mode_number: Mode number to extract (1-based index)
f06_file: Optional F06 file for fallback extraction
verbose: Print detailed extraction information
Returns:
Natural frequency in Hz
Raises:
ValueError: If frequency cannot be extracted by any method
"""
from pyNastran.op2.op2 import OP2
if not op2_file.exists():
raise FileNotFoundError(f"OP2 file not found: {op2_file}")
# Strategy 1: Try standard OP2 reading
try:
if verbose:
print(f"[OP2 EXTRACT] Attempting standard read: {op2_file.name}")
model = OP2()
model.read_op2(str(op2_file))
if hasattr(model, 'eigenvalues') and len(model.eigenvalues) > 0:
frequency = _extract_frequency_from_model(model, mode_number)
if verbose:
print(f"[OP2 EXTRACT] ✓ Success (standard read): {frequency:.6f} Hz")
return frequency
else:
raise ValueError("No eigenvalues found in OP2 file")
except Exception as e:
if verbose:
print(f"[OP2 EXTRACT] ✗ Standard read failed: {str(e)[:100]}")
# Check if this is a FATAL flag issue
is_fatal_flag = 'FATAL' in str(e) and 'op2_reader' in str(e.__class__.__module__)
if is_fatal_flag:
# Strategy 2: Try reading with more lenient settings
if verbose:
print(f"[OP2 EXTRACT] Detected pyNastran FATAL flag issue")
print(f"[OP2 EXTRACT] Attempting partial extraction...")
try:
model = OP2()
# Try to read with debug=False and skip_undefined_matrices=True
model.read_op2(
str(op2_file),
debug=False,
skip_undefined_matrices=True
)
# Check if eigenvalues were extracted despite FATAL
if hasattr(model, 'eigenvalues') and len(model.eigenvalues) > 0:
frequency = _extract_frequency_from_model(model, mode_number)
if verbose:
print(f"[OP2 EXTRACT] ✓ Success (lenient mode): {frequency:.6f} Hz")
print(f"[OP2 EXTRACT] Note: pyNastran reported FATAL but data is valid!")
return frequency
except Exception as e2:
if verbose:
print(f"[OP2 EXTRACT] ✗ Lenient read also failed: {str(e2)[:100]}")
# Strategy 3: Fallback to F06 parsing
if f06_file and f06_file.exists():
if verbose:
print(f"[OP2 EXTRACT] Falling back to F06 extraction: {f06_file.name}")
try:
frequency = extract_frequency_from_f06(f06_file, mode_number, verbose=verbose)
if verbose:
print(f"[OP2 EXTRACT] ✓ Success (F06 fallback): {frequency:.6f} Hz")
return frequency
except Exception as e3:
if verbose:
print(f"[OP2 EXTRACT] ✗ F06 extraction failed: {str(e3)}")
# All strategies failed
raise ValueError(
f"Could not extract frequency from OP2 file: {op2_file.name}. "
f"Original error: {str(e)}"
)
def _extract_frequency_from_model(model, mode_number: int) -> float:
"""Extract frequency from loaded OP2 model."""
if not hasattr(model, 'eigenvalues') or len(model.eigenvalues) == 0:
raise ValueError("No eigenvalues found in model")
# Get first subcase
subcase = list(model.eigenvalues.keys())[0]
eig_obj = model.eigenvalues[subcase]
# Check if mode exists
if mode_number > len(eig_obj.eigenvalues):
raise ValueError(
f"Mode {mode_number} not found. "
f"Only {len(eig_obj.eigenvalues)} modes available"
)
# Extract eigenvalue and convert to frequency
eigenvalue = eig_obj.eigenvalues[mode_number - 1]
angular_freq = np.sqrt(abs(eigenvalue)) # Use abs to handle numerical precision issues
frequency_hz = angular_freq / (2 * np.pi)
return float(frequency_hz)
def extract_frequency_from_f06(
f06_file: Path,
mode_number: int = 1,
verbose: bool = False
) -> float:
"""
Extract natural frequency from F06 text file (fallback method).
Parses the F06 file to find eigenvalue results table and extracts frequency.
Args:
f06_file: Path to F06 output file
mode_number: Mode number to extract (1-based index)
verbose: Print extraction details
Returns:
Natural frequency in Hz
Raises:
ValueError: If frequency cannot be found in F06
"""
if not f06_file.exists():
raise FileNotFoundError(f"F06 file not found: {f06_file}")
with open(f06_file, 'r', encoding='latin-1', errors='ignore') as f:
content = f.read()
# Look for eigenvalue table
# Nastran F06 format has eigenvalue results like:
# R E A L E I G E N V A L U E S
# MODE EXTRACTION EIGENVALUE RADIANS CYCLES GENERALIZED GENERALIZED
# NO. ORDER MASS STIFFNESS
# 1 1 -6.602743E+04 2.569656E+02 4.089338E+01 1.000000E+00 6.602743E+04
lines = content.split('\n')
# Find eigenvalue table
eigenvalue_section_start = None
for i, line in enumerate(lines):
if 'R E A L E I G E N V A L U E S' in line:
eigenvalue_section_start = i
break
if eigenvalue_section_start is None:
raise ValueError("Eigenvalue table not found in F06 file")
# Parse eigenvalue table (starts a few lines after header)
for i in range(eigenvalue_section_start + 3, min(eigenvalue_section_start + 100, len(lines))):
line = lines[i].strip()
if not line or line.startswith('1'): # Page break
continue
# Parse line with mode data
parts = line.split()
if len(parts) >= 5:
try:
mode_num = int(parts[0])
if mode_num == mode_number:
# Frequency is in column 5 (CYCLES)
frequency = float(parts[4])
if verbose:
print(f"[F06 EXTRACT] Found mode {mode_num}: {frequency:.6f} Hz")
return frequency
except (ValueError, IndexError):
continue
raise ValueError(f"Mode {mode_number} not found in F06 eigenvalue table")
def validate_op2_file(op2_file: Path, f06_file: Optional[Path] = None) -> Tuple[bool, str]:
"""
Validate if an OP2 file contains usable eigenvalue data.
Args:
op2_file: Path to OP2 file
f06_file: Optional F06 file for cross-reference
Returns:
(is_valid, message): Tuple of validation status and explanation
"""
if not op2_file.exists():
return False, f"OP2 file does not exist: {op2_file}"
if op2_file.stat().st_size == 0:
return False, "OP2 file is empty"
# Try to extract first frequency
try:
frequency = robust_extract_first_frequency(
op2_file,
mode_number=1,
f06_file=f06_file,
verbose=False
)
return True, f"Valid OP2 file (first frequency: {frequency:.6f} Hz)"
except Exception as e:
return False, f"Cannot extract data from OP2: {str(e)}"
# Convenience function (same signature as old function for backward compatibility)
def extract_first_frequency(op2_file: Path, mode_number: int = 1) -> float:
"""
Extract first natural frequency (backward compatible with old function).
This is the simple version - just use robust_extract_first_frequency directly
for more control.
Args:
op2_file: Path to OP2 file
mode_number: Mode number (1-based)
Returns:
Frequency in Hz
"""
# Try to find F06 file in same directory
f06_file = op2_file.with_suffix('.f06')
return robust_extract_first_frequency(
op2_file,
mode_number=mode_number,
f06_file=f06_file if f06_file.exists() else None,
verbose=False
)

View File

@@ -0,0 +1,329 @@
"""
Pruning Logger - Comprehensive tracking of failed trials during optimization.
This module provides detailed logging of why trials are pruned, including:
- Validation failures
- Simulation failures
- OP2 extraction failures
- Parameter values at failure
- Error messages and stack traces
Usage:
logger = PruningLogger(results_dir=Path("studies/my_study/2_results"))
# Log different types of failures
logger.log_validation_failure(trial_number, params, reasons)
logger.log_simulation_failure(trial_number, params, error_msg)
logger.log_op2_extraction_failure(trial_number, params, exception, op2_file)
# Generate summary report
logger.save_summary()
"""
import json
import traceback
from pathlib import Path
from typing import Dict, List, Any, Optional
from datetime import datetime
class PruningLogger:
"""Comprehensive logger for tracking pruned trials during optimization."""
def __init__(self, results_dir: Path, verbose: bool = True):
"""
Initialize pruning logger.
Args:
results_dir: Directory to save pruning logs (typically 2_results/)
verbose: Print pruning events to console
"""
self.results_dir = Path(results_dir)
self.results_dir.mkdir(parents=True, exist_ok=True)
self.verbose = verbose
# Log file paths
self.pruning_log_file = self.results_dir / "pruning_history.json"
self.pruning_summary_file = self.results_dir / "pruning_summary.json"
# In-memory log
self.pruning_events = []
# Load existing log if it exists
if self.pruning_log_file.exists():
with open(self.pruning_log_file, 'r', encoding='utf-8') as f:
self.pruning_events = json.load(f)
# Statistics
self.stats = {
'validation_failures': 0,
'simulation_failures': 0,
'op2_extraction_failures': 0,
'total_pruned': 0
}
def log_validation_failure(
self,
trial_number: int,
design_variables: Dict[str, float],
validation_warnings: List[str]
):
"""
Log a trial that was pruned due to validation failure.
Args:
trial_number: Trial number
design_variables: Parameter values that failed validation
validation_warnings: List of validation error messages
"""
event = {
'trial_number': trial_number,
'timestamp': datetime.now().isoformat(),
'pruning_cause': 'validation_failure',
'design_variables': design_variables,
'validation_warnings': validation_warnings,
'details': {
'validator_rejected': True,
'warning_count': len(validation_warnings)
}
}
self._add_event(event)
self.stats['validation_failures'] += 1
if self.verbose:
print(f"\n[PRUNING LOG] Trial #{trial_number} - Validation Failure")
print(f" Parameters: {self._format_params(design_variables)}")
print(f" Reasons: {len(validation_warnings)} validation errors")
for warning in validation_warnings:
print(f" - {warning}")
def log_simulation_failure(
self,
trial_number: int,
design_variables: Dict[str, float],
error_message: str,
return_code: Optional[int] = None,
solver_errors: Optional[List[str]] = None
):
"""
Log a trial that was pruned due to simulation failure.
Args:
trial_number: Trial number
design_variables: Parameter values
error_message: Main error message
return_code: Solver return code (if available)
solver_errors: List of solver error messages from F06
"""
event = {
'trial_number': trial_number,
'timestamp': datetime.now().isoformat(),
'pruning_cause': 'simulation_failure',
'design_variables': design_variables,
'error_message': error_message,
'details': {
'return_code': return_code,
'solver_errors': solver_errors if solver_errors else []
}
}
self._add_event(event)
self.stats['simulation_failures'] += 1
if self.verbose:
print(f"\n[PRUNING LOG] Trial #{trial_number} - Simulation Failure")
print(f" Parameters: {self._format_params(design_variables)}")
print(f" Error: {error_message}")
if return_code is not None:
print(f" Return code: {return_code}")
if solver_errors:
print(f" Solver errors:")
for err in solver_errors[:3]: # Show first 3
print(f" - {err}")
def log_op2_extraction_failure(
self,
trial_number: int,
design_variables: Dict[str, float],
exception: Exception,
op2_file: Optional[Path] = None,
f06_file: Optional[Path] = None
):
"""
Log a trial that was pruned due to OP2 extraction failure.
Args:
trial_number: Trial number
design_variables: Parameter values
exception: The exception that was raised
op2_file: Path to OP2 file (if exists)
f06_file: Path to F06 file (for reference)
"""
# Get full stack trace
tb = traceback.format_exc()
# Check if this is a pyNastran FATAL error
is_fatal_error = 'FATAL' in str(exception) and 'op2_reader' in tb
# Check F06 for actual errors if provided
f06_has_fatal = False
f06_errors = []
if f06_file and f06_file.exists():
try:
with open(f06_file, 'r', encoding='latin-1', errors='ignore') as f:
f06_content = f.read()
f06_has_fatal = 'FATAL' in f06_content
# Extract fatal errors
for line in f06_content.split('\n'):
if 'FATAL' in line.upper() or 'ERROR' in line.upper():
f06_errors.append(line.strip())
except Exception:
pass
event = {
'trial_number': trial_number,
'timestamp': datetime.now().isoformat(),
'pruning_cause': 'op2_extraction_failure',
'design_variables': design_variables,
'exception_type': type(exception).__name__,
'exception_message': str(exception),
'stack_trace': tb,
'details': {
'op2_file': str(op2_file) if op2_file else None,
'op2_exists': op2_file.exists() if op2_file else False,
'op2_size_bytes': op2_file.stat().st_size if (op2_file and op2_file.exists()) else 0,
'f06_file': str(f06_file) if f06_file else None,
'is_pynastran_fatal_flag': is_fatal_error,
'f06_has_fatal_errors': f06_has_fatal,
'f06_errors': f06_errors[:5] # First 5 errors
}
}
self._add_event(event)
self.stats['op2_extraction_failures'] += 1
if self.verbose:
print(f"\n[PRUNING LOG] Trial #{trial_number} - OP2 Extraction Failure")
print(f" Parameters: {self._format_params(design_variables)}")
print(f" Exception: {type(exception).__name__}: {str(exception)}")
if is_fatal_error and not f06_has_fatal:
print(f" WARNING: pyNastran detected FATAL flag in OP2 header")
print(f" BUT F06 file has NO FATAL errors!")
print(f" This is likely a false positive - simulation may have succeeded")
if op2_file:
print(f" OP2 file: {op2_file.name} ({'exists' if op2_file.exists() else 'missing'})")
if op2_file.exists():
print(f" OP2 size: {op2_file.stat().st_size:,} bytes")
def _add_event(self, event: Dict[str, Any]):
"""Add event to log and save to disk."""
self.pruning_events.append(event)
self.stats['total_pruned'] = len(self.pruning_events)
# Save incrementally
self._save_log()
def _save_log(self):
"""Save pruning log to disk."""
with open(self.pruning_log_file, 'w', encoding='utf-8') as f:
json.dump(self.pruning_events, f, indent=2)
def save_summary(self) -> Dict[str, Any]:
"""
Generate and save pruning summary report.
Returns:
Summary dictionary
"""
# Analyze patterns
validation_reasons = {}
simulation_errors = {}
op2_false_positives = 0
for event in self.pruning_events:
if event['pruning_cause'] == 'validation_failure':
for warning in event['validation_warnings']:
validation_reasons[warning] = validation_reasons.get(warning, 0) + 1
elif event['pruning_cause'] == 'simulation_failure':
error = event['error_message']
simulation_errors[error] = simulation_errors.get(error, 0) + 1
elif event['pruning_cause'] == 'op2_extraction_failure':
if event['details'].get('is_pynastran_fatal_flag') and not event['details'].get('f06_has_fatal_errors'):
op2_false_positives += 1
summary = {
'generated': datetime.now().isoformat(),
'total_pruned_trials': self.stats['total_pruned'],
'breakdown': {
'validation_failures': self.stats['validation_failures'],
'simulation_failures': self.stats['simulation_failures'],
'op2_extraction_failures': self.stats['op2_extraction_failures']
},
'validation_failure_reasons': validation_reasons,
'simulation_failure_types': simulation_errors,
'op2_extraction_analysis': {
'total_op2_failures': self.stats['op2_extraction_failures'],
'likely_false_positives': op2_false_positives,
'description': 'False positives are OP2 extraction failures where pyNastran detected FATAL flag but F06 has no errors'
},
'recommendations': self._generate_recommendations(op2_false_positives)
}
# Save summary
with open(self.pruning_summary_file, 'w', encoding='utf-8') as f:
json.dump(summary, f, indent=2)
if self.verbose:
print(f"\n[PRUNING SUMMARY] Saved to {self.pruning_summary_file}")
print(f" Total pruned: {summary['total_pruned_trials']}")
print(f" Validation failures: {summary['breakdown']['validation_failures']}")
print(f" Simulation failures: {summary['breakdown']['simulation_failures']}")
print(f" OP2 extraction failures: {summary['breakdown']['op2_extraction_failures']}")
if op2_false_positives > 0:
print(f"\n WARNING: {op2_false_positives} likely FALSE POSITIVES detected!")
print(f" These are pyNastran OP2 reader issues, not real failures")
return summary
def _generate_recommendations(self, op2_false_positives: int) -> List[str]:
"""Generate recommendations based on pruning patterns."""
recommendations = []
if op2_false_positives > 0:
recommendations.append(
f"CRITICAL: {op2_false_positives} trials failed due to pyNastran OP2 reader being overly strict. "
f"Use robust_extract_first_frequency() to ignore benign FATAL flags and extract valid results."
)
if self.stats['validation_failures'] == 0 and self.stats['simulation_failures'] > 0:
recommendations.append(
"Consider adding validation rules to catch simulation failures earlier "
"(saves ~30 seconds per invalid trial)."
)
if self.stats['total_pruned'] == 0:
recommendations.append("Excellent! No pruning detected - all trials succeeded.")
return recommendations
def _format_params(self, params: Dict[str, float]) -> str:
"""Format parameters for display."""
return ", ".join(f"{k}={v:.2f}" for k, v in params.items())
def create_pruning_logger(results_dir: Path, verbose: bool = True) -> PruningLogger:
"""
Convenience function to create a pruning logger.
Args:
results_dir: Results directory for the study
verbose: Print pruning events to console
Returns:
PruningLogger instance
"""
return PruningLogger(results_dir, verbose)

View File

@@ -0,0 +1,214 @@
"""
Simulation Validator - Validates design parameters before running FEA simulations.
This module helps prevent simulation failures by:
1. Checking if geometry will be valid
2. Validating parameter combinations
3. Providing actionable error messages
4. Detecting likely failure modes
Usage:
validator = SimulationValidator(model_type='circular_plate')
is_valid, warnings = validator.validate(design_variables)
if not is_valid:
print(f"Invalid parameters: {warnings}")
"""
from typing import Dict, Tuple, List
from pathlib import Path
class SimulationValidator:
"""Validates design parameters before running simulations."""
def __init__(self, model_type: str = 'generic', verbose: bool = True):
"""
Initialize validator for specific model type.
Args:
model_type: Type of FEA model ('circular_plate', 'beam', etc.)
verbose: Print validation warnings
"""
self.model_type = model_type
self.verbose = verbose
# Model-specific validation rules
self.validation_rules = self._get_validation_rules(model_type)
def _get_validation_rules(self, model_type: str) -> Dict:
"""Get validation rules for specific model type."""
if model_type == 'circular_plate':
# NOTE: Only use parameter bounds for validation
# No arbitrary aspect ratio checks - let Optuna explore the full parameter space
# Modal analysis is robust and doesn't need strict aspect ratio limits
return {}
# Generic rules for unknown models
return {}
def validate(
self,
design_variables: Dict[str, float],
strict: bool = False
) -> Tuple[bool, List[str]]:
"""
Validate design variables before simulation.
Args:
design_variables: Dict of parameter names to values
strict: If True, reject on soft limit violations (warnings)
Returns:
(is_valid, warnings_list)
- is_valid: True if parameters are acceptable
- warnings_list: List of warning/error messages
"""
warnings = []
is_valid = True
# Check each parameter
for param_name, value in design_variables.items():
if param_name not in self.validation_rules:
continue # No rules for this parameter
rules = self.validation_rules[param_name]
# Hard limits (always reject)
if value < rules.get('min', float('-inf')):
is_valid = False
warnings.append(
f"INVALID: {param_name}={value:.2f} < min={rules['min']:.2f}. "
f"{rules.get('reason', '')}"
)
if value > rules.get('max', float('inf')):
is_valid = False
warnings.append(
f"INVALID: {param_name}={value:.2f} > max={rules['max']:.2f}. "
f"{rules.get('reason', '')}"
)
# Soft limits (warnings, may cause issues)
if 'soft_min' in rules and value < rules['soft_min']:
msg = (
f"WARNING: {param_name}={value:.2f} < recommended={rules['soft_min']:.2f}. "
f"{rules.get('reason', 'May cause simulation issues')}"
)
warnings.append(msg)
if strict:
is_valid = False
if 'soft_max' in rules and value > rules['soft_max']:
msg = (
f"WARNING: {param_name}={value:.2f} > recommended={rules['soft_max']:.2f}. "
f"{rules.get('reason', 'May cause simulation issues')}"
)
warnings.append(msg)
if strict:
is_valid = False
# Model-specific combined checks can be added here if needed
# For now, rely only on parameter bounds (no arbitrary physics checks)
# Print warnings if verbose
if self.verbose and warnings:
print(f"\n[VALIDATOR] Validation results:")
for warning in warnings:
print(f" {warning}")
return is_valid, warnings
def _validate_circular_plate_aspect_ratio(
self,
design_variables: Dict[str, float]
) -> tuple[bool, List[str]]:
"""Check circular plate aspect ratio (diameter/thickness).
Returns:
(is_valid, warnings): Tuple of validation status and warning messages
"""
warnings = []
is_valid = True
diameter = design_variables.get('inner_diameter')
thickness = design_variables.get('plate_thickness')
if diameter and thickness:
aspect_ratio = diameter / thickness
rules = self.validation_rules.get('aspect_ratio', {})
min_aspect = rules.get('min', 0)
max_aspect = rules.get('max', float('inf'))
if aspect_ratio > max_aspect:
is_valid = False # FIX: Make this a hard rejection
warnings.append(
f"INVALID: Aspect ratio {aspect_ratio:.1f} > {max_aspect:.1f}. "
f"Very thin plate will cause numerical instability."
)
elif aspect_ratio < min_aspect:
is_valid = False # FIX: Make this a hard rejection
warnings.append(
f"INVALID: Aspect ratio {aspect_ratio:.1f} < {min_aspect:.1f}. "
f"Very thick plate will have poor mesh quality."
)
return is_valid, warnings
def suggest_corrections(
self,
design_variables: Dict[str, float]
) -> Dict[str, float]:
"""
Suggest corrected parameters that are more likely to succeed.
Args:
design_variables: Original parameters
Returns:
Corrected parameters (clamped to safe ranges)
"""
corrected = design_variables.copy()
for param_name, value in design_variables.items():
if param_name not in self.validation_rules:
continue
rules = self.validation_rules[param_name]
# Clamp to soft limits (safer range)
soft_min = rules.get('soft_min', rules.get('min', float('-inf')))
soft_max = rules.get('soft_max', rules.get('max', float('inf')))
if value < soft_min:
corrected[param_name] = soft_min
if self.verbose:
print(f"[VALIDATOR] Corrected {param_name}: {value:.2f} -> {soft_min:.2f}")
if value > soft_max:
corrected[param_name] = soft_max
if self.verbose:
print(f"[VALIDATOR] Corrected {param_name}: {value:.2f} -> {soft_max:.2f}")
return corrected
def validate_before_simulation(
design_variables: Dict[str, float],
model_type: str = 'circular_plate',
strict: bool = False
) -> Tuple[bool, List[str]]:
"""
Convenience function for quick validation.
Args:
design_variables: Parameters to validate
model_type: Type of FEA model
strict: Reject on warnings (not just errors)
Returns:
(is_valid, warnings)
"""
validator = SimulationValidator(model_type=model_type, verbose=False)
return validator.validate(design_variables, strict=strict)