refactor: Major reorganization of optimization_engine module structure
BREAKING CHANGE: Module paths have been reorganized for better maintainability. Backwards compatibility aliases with deprecation warnings are provided. New Structure: - core/ - Optimization runners (runner, intelligent_optimizer, etc.) - processors/ - Data processing - surrogates/ - Neural network surrogates - nx/ - NX/Nastran integration (solver, updater, session_manager) - study/ - Study management (creator, wizard, state, reset) - reporting/ - Reports and analysis (visualizer, report_generator) - config/ - Configuration management (manager, builder) - utils/ - Utilities (logger, auto_doc, etc.) - future/ - Research/experimental code Migration: - ~200 import changes across 125 files - All __init__.py files use lazy loading to avoid circular imports - Backwards compatibility layer supports old import paths with warnings - All existing functionality preserved To migrate existing code: OLD: from optimization_engine.nx_solver import NXSolver NEW: from optimization_engine.nx.solver import NXSolver OLD: from optimization_engine.runner import OptimizationRunner NEW: from optimization_engine.core.runner import OptimizationRunner 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
393
optimization_engine/reporting/results_analyzer.py
Normal file
393
optimization_engine/reporting/results_analyzer.py
Normal file
@@ -0,0 +1,393 @@
|
||||
"""
|
||||
Comprehensive Results Analyzer
|
||||
|
||||
Performs thorough introspection of OP2, F06, and other Nastran output files
|
||||
to discover ALL available results, not just what we expect.
|
||||
|
||||
This helps ensure we don't miss important data that's actually in the output files.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional
|
||||
import json
|
||||
from dataclasses import dataclass, asdict
|
||||
from pyNastran.op2.op2 import OP2
|
||||
|
||||
|
||||
@dataclass
|
||||
class OP2Contents:
|
||||
"""Complete inventory of OP2 file contents."""
|
||||
file_path: str
|
||||
subcases: List[int]
|
||||
|
||||
# Displacement results
|
||||
displacement_available: bool
|
||||
displacement_subcases: List[int]
|
||||
|
||||
# Stress results (by element type)
|
||||
stress_results: Dict[str, List[int]] # element_type -> [subcases]
|
||||
|
||||
# Strain results (by element type)
|
||||
strain_results: Dict[str, List[int]]
|
||||
|
||||
# Force results
|
||||
force_results: Dict[str, List[int]]
|
||||
|
||||
# Other results
|
||||
other_results: Dict[str, Any]
|
||||
|
||||
# Grid point forces/stresses
|
||||
grid_point_forces: List[int]
|
||||
spc_forces: List[int]
|
||||
mpc_forces: List[int]
|
||||
|
||||
# Summary
|
||||
total_result_types: int
|
||||
element_types_with_results: List[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class F06Contents:
|
||||
"""Complete inventory of F06 file contents."""
|
||||
file_path: str
|
||||
has_displacement: bool
|
||||
has_stress: bool
|
||||
has_strain: bool
|
||||
has_forces: bool
|
||||
element_types_found: List[str]
|
||||
error_messages: List[str]
|
||||
warning_messages: List[str]
|
||||
|
||||
|
||||
class ComprehensiveResultsAnalyzer:
|
||||
"""
|
||||
Analyzes ALL Nastran output files to discover available results.
|
||||
|
||||
This is much more thorough than just checking expected results.
|
||||
"""
|
||||
|
||||
def __init__(self, output_dir: Path):
|
||||
"""
|
||||
Initialize analyzer.
|
||||
|
||||
Args:
|
||||
output_dir: Directory containing Nastran output files (.op2, .f06, etc.)
|
||||
"""
|
||||
self.output_dir = Path(output_dir)
|
||||
|
||||
def analyze_op2(self, op2_file: Path) -> OP2Contents:
|
||||
"""
|
||||
Comprehensively analyze OP2 file contents.
|
||||
|
||||
Args:
|
||||
op2_file: Path to OP2 file
|
||||
|
||||
Returns:
|
||||
OP2Contents with complete inventory
|
||||
"""
|
||||
print(f"\n[OP2 ANALYSIS] Reading: {op2_file.name}")
|
||||
|
||||
model = OP2()
|
||||
model.read_op2(str(op2_file))
|
||||
|
||||
# Discover all subcases
|
||||
all_subcases = set()
|
||||
|
||||
# Check displacement
|
||||
displacement_available = hasattr(model, 'displacements') and len(model.displacements) > 0
|
||||
displacement_subcases = list(model.displacements.keys()) if displacement_available else []
|
||||
all_subcases.update(displacement_subcases)
|
||||
|
||||
print(f" Displacement: {'YES' if displacement_available else 'NO'}")
|
||||
if displacement_subcases:
|
||||
print(f" Subcases: {displacement_subcases}")
|
||||
|
||||
# Check ALL stress results by scanning attributes
|
||||
stress_results = {}
|
||||
element_types_with_stress = []
|
||||
|
||||
# List of known stress attribute names (safer than scanning all attributes)
|
||||
stress_attrs = [
|
||||
'cquad4_stress', 'ctria3_stress', 'ctetra_stress', 'chexa_stress', 'cpenta_stress',
|
||||
'cbar_stress', 'cbeam_stress', 'crod_stress', 'conrod_stress', 'ctube_stress',
|
||||
'cshear_stress', 'cbush_stress', 'cgap_stress', 'celas1_stress', 'celas2_stress',
|
||||
'celas3_stress', 'celas4_stress'
|
||||
]
|
||||
|
||||
for attr_name in stress_attrs:
|
||||
if hasattr(model, attr_name):
|
||||
try:
|
||||
stress_obj = getattr(model, attr_name)
|
||||
if isinstance(stress_obj, dict) and len(stress_obj) > 0:
|
||||
element_type = attr_name.replace('_stress', '')
|
||||
subcases = list(stress_obj.keys())
|
||||
stress_results[element_type] = subcases
|
||||
element_types_with_stress.append(element_type)
|
||||
all_subcases.update(subcases)
|
||||
print(f" Stress [{element_type}]: YES")
|
||||
print(f" Subcases: {subcases}")
|
||||
except Exception as e:
|
||||
# Skip attributes that cause errors
|
||||
pass
|
||||
|
||||
if not stress_results:
|
||||
print(f" Stress: NO stress results found")
|
||||
|
||||
# Check ALL strain results
|
||||
strain_results = {}
|
||||
strain_attrs = [attr.replace('_stress', '_strain') for attr in stress_attrs]
|
||||
|
||||
for attr_name in strain_attrs:
|
||||
if hasattr(model, attr_name):
|
||||
try:
|
||||
strain_obj = getattr(model, attr_name)
|
||||
if isinstance(strain_obj, dict) and len(strain_obj) > 0:
|
||||
element_type = attr_name.replace('_strain', '')
|
||||
subcases = list(strain_obj.keys())
|
||||
strain_results[element_type] = subcases
|
||||
all_subcases.update(subcases)
|
||||
print(f" Strain [{element_type}]: YES")
|
||||
print(f" Subcases: {subcases}")
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if not strain_results:
|
||||
print(f" Strain: NO strain results found")
|
||||
|
||||
# Check ALL force results
|
||||
force_results = {}
|
||||
force_attrs = [attr.replace('_stress', '_force') for attr in stress_attrs]
|
||||
|
||||
for attr_name in force_attrs:
|
||||
if hasattr(model, attr_name):
|
||||
try:
|
||||
force_obj = getattr(model, attr_name)
|
||||
if isinstance(force_obj, dict) and len(force_obj) > 0:
|
||||
element_type = attr_name.replace('_force', '')
|
||||
subcases = list(force_obj.keys())
|
||||
force_results[element_type] = subcases
|
||||
all_subcases.update(subcases)
|
||||
print(f" Force [{element_type}]: YES")
|
||||
print(f" Subcases: {subcases}")
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if not force_results:
|
||||
print(f" Force: NO force results found")
|
||||
|
||||
# Check grid point forces
|
||||
grid_point_forces = list(model.grid_point_forces.keys()) if hasattr(model, 'grid_point_forces') else []
|
||||
if grid_point_forces:
|
||||
print(f" Grid Point Forces: YES")
|
||||
print(f" Subcases: {grid_point_forces}")
|
||||
all_subcases.update(grid_point_forces)
|
||||
|
||||
# Check SPC/MPC forces
|
||||
spc_forces = list(model.spc_forces.keys()) if hasattr(model, 'spc_forces') else []
|
||||
mpc_forces = list(model.mpc_forces.keys()) if hasattr(model, 'mpc_forces') else []
|
||||
|
||||
if spc_forces:
|
||||
print(f" SPC Forces: YES")
|
||||
print(f" Subcases: {spc_forces}")
|
||||
all_subcases.update(spc_forces)
|
||||
|
||||
if mpc_forces:
|
||||
print(f" MPC Forces: YES")
|
||||
print(f" Subcases: {mpc_forces}")
|
||||
all_subcases.update(mpc_forces)
|
||||
|
||||
# Check for other interesting results
|
||||
other_results = {}
|
||||
interesting_attrs = ['eigenvalues', 'eigenvectors', 'thermal_load_vectors',
|
||||
'load_vectors', 'contact', 'glue', 'slide_lines']
|
||||
|
||||
for attr_name in interesting_attrs:
|
||||
if hasattr(model, attr_name):
|
||||
obj = getattr(model, attr_name)
|
||||
if obj and (isinstance(obj, dict) and len(obj) > 0) or (not isinstance(obj, dict)):
|
||||
other_results[attr_name] = str(type(obj))
|
||||
print(f" {attr_name}: YES")
|
||||
|
||||
# Collect all element types that have any results
|
||||
all_element_types = set()
|
||||
all_element_types.update(stress_results.keys())
|
||||
all_element_types.update(strain_results.keys())
|
||||
all_element_types.update(force_results.keys())
|
||||
|
||||
total_result_types = (
|
||||
len(stress_results) +
|
||||
len(strain_results) +
|
||||
len(force_results) +
|
||||
(1 if displacement_available else 0) +
|
||||
(1 if grid_point_forces else 0) +
|
||||
(1 if spc_forces else 0) +
|
||||
(1 if mpc_forces else 0) +
|
||||
len(other_results)
|
||||
)
|
||||
|
||||
print(f"\n SUMMARY:")
|
||||
print(f" Total subcases: {len(all_subcases)}")
|
||||
print(f" Total result types: {total_result_types}")
|
||||
print(f" Element types with results: {sorted(all_element_types)}")
|
||||
|
||||
return OP2Contents(
|
||||
file_path=str(op2_file),
|
||||
subcases=sorted(all_subcases),
|
||||
displacement_available=displacement_available,
|
||||
displacement_subcases=displacement_subcases,
|
||||
stress_results=stress_results,
|
||||
strain_results=strain_results,
|
||||
force_results=force_results,
|
||||
other_results=other_results,
|
||||
grid_point_forces=grid_point_forces,
|
||||
spc_forces=spc_forces,
|
||||
mpc_forces=mpc_forces,
|
||||
total_result_types=total_result_types,
|
||||
element_types_with_results=sorted(all_element_types)
|
||||
)
|
||||
|
||||
def analyze_f06(self, f06_file: Path) -> F06Contents:
|
||||
"""
|
||||
Analyze F06 file for available results.
|
||||
|
||||
Args:
|
||||
f06_file: Path to F06 file
|
||||
|
||||
Returns:
|
||||
F06Contents with inventory
|
||||
"""
|
||||
print(f"\n[F06 ANALYSIS] Reading: {f06_file.name}")
|
||||
|
||||
if not f06_file.exists():
|
||||
print(f" F06 file not found")
|
||||
return F06Contents(
|
||||
file_path=str(f06_file),
|
||||
has_displacement=False,
|
||||
has_stress=False,
|
||||
has_strain=False,
|
||||
has_forces=False,
|
||||
element_types_found=[],
|
||||
error_messages=[],
|
||||
warning_messages=[]
|
||||
)
|
||||
|
||||
# Read F06 file
|
||||
with open(f06_file, 'r', encoding='latin-1', errors='ignore') as f:
|
||||
content = f.read()
|
||||
|
||||
# Search for key sections
|
||||
has_displacement = 'D I S P L A C E M E N T' in content
|
||||
has_stress = 'S T R E S S E S' in content
|
||||
has_strain = 'S T R A I N S' in content
|
||||
has_forces = 'F O R C E S' in content
|
||||
|
||||
print(f" Displacement: {'YES' if has_displacement else 'NO'}")
|
||||
print(f" Stress: {'YES' if has_stress else 'NO'}")
|
||||
print(f" Strain: {'YES' if has_strain else 'NO'}")
|
||||
print(f" Forces: {'YES' if has_forces else 'NO'}")
|
||||
|
||||
# Find element types mentioned
|
||||
element_keywords = ['CQUAD4', 'CTRIA3', 'CTETRA', 'CHEXA', 'CPENTA', 'CBAR', 'CBEAM', 'CROD']
|
||||
element_types_found = []
|
||||
|
||||
for elem_type in element_keywords:
|
||||
if elem_type in content:
|
||||
element_types_found.append(elem_type)
|
||||
|
||||
if element_types_found:
|
||||
print(f" Element types: {element_types_found}")
|
||||
|
||||
# Extract errors and warnings
|
||||
error_messages = []
|
||||
warning_messages = []
|
||||
|
||||
for line in content.split('\n'):
|
||||
line_upper = line.upper()
|
||||
if 'ERROR' in line_upper or 'FATAL' in line_upper:
|
||||
error_messages.append(line.strip())
|
||||
elif 'WARNING' in line_upper or 'WARN' in line_upper:
|
||||
warning_messages.append(line.strip())
|
||||
|
||||
if error_messages:
|
||||
print(f" Errors found: {len(error_messages)}")
|
||||
for err in error_messages[:5]: # Show first 5
|
||||
print(f" {err}")
|
||||
|
||||
if warning_messages:
|
||||
print(f" Warnings found: {len(warning_messages)}")
|
||||
for warn in warning_messages[:5]: # Show first 5
|
||||
print(f" {warn}")
|
||||
|
||||
return F06Contents(
|
||||
file_path=str(f06_file),
|
||||
has_displacement=has_displacement,
|
||||
has_stress=has_stress,
|
||||
has_strain=has_strain,
|
||||
has_forces=has_forces,
|
||||
element_types_found=element_types_found,
|
||||
error_messages=error_messages[:20], # Keep first 20
|
||||
warning_messages=warning_messages[:20]
|
||||
)
|
||||
|
||||
def analyze_all(self, op2_pattern: str = "*.op2", f06_pattern: str = "*.f06") -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze all OP2 and F06 files in directory.
|
||||
|
||||
Args:
|
||||
op2_pattern: Glob pattern for OP2 files
|
||||
f06_pattern: Glob pattern for F06 files
|
||||
|
||||
Returns:
|
||||
Dict with complete analysis results
|
||||
"""
|
||||
print("="*80)
|
||||
print("COMPREHENSIVE NASTRAN RESULTS ANALYSIS")
|
||||
print("="*80)
|
||||
print(f"\nDirectory: {self.output_dir}")
|
||||
|
||||
results = {
|
||||
'directory': str(self.output_dir),
|
||||
'op2_files': [],
|
||||
'f06_files': []
|
||||
}
|
||||
|
||||
# Find and analyze all OP2 files
|
||||
op2_files = list(self.output_dir.glob(op2_pattern))
|
||||
print(f"\nFound {len(op2_files)} OP2 file(s)")
|
||||
|
||||
for op2_file in op2_files:
|
||||
op2_contents = self.analyze_op2(op2_file)
|
||||
results['op2_files'].append(asdict(op2_contents))
|
||||
|
||||
# Find and analyze all F06 files
|
||||
f06_files = list(self.output_dir.glob(f06_pattern))
|
||||
print(f"\nFound {len(f06_files)} F06 file(s)")
|
||||
|
||||
for f06_file in f06_files:
|
||||
f06_contents = self.analyze_f06(f06_file)
|
||||
results['f06_files'].append(asdict(f06_contents))
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("ANALYSIS COMPLETE")
|
||||
print("="*80)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
output_dir = Path(sys.argv[1])
|
||||
else:
|
||||
output_dir = Path.cwd()
|
||||
|
||||
analyzer = ComprehensiveResultsAnalyzer(output_dir)
|
||||
results = analyzer.analyze_all()
|
||||
|
||||
# Save results to JSON
|
||||
output_file = output_dir / "comprehensive_results_analysis.json"
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(results, f, indent=2)
|
||||
|
||||
print(f"\nResults saved to: {output_file}")
|
||||
Reference in New Issue
Block a user