refactor: Major reorganization of optimization_engine module structure
BREAKING CHANGE: Module paths have been reorganized for better maintainability. Backwards compatibility aliases with deprecation warnings are provided. New Structure: - core/ - Optimization runners (runner, intelligent_optimizer, etc.) - processors/ - Data processing - surrogates/ - Neural network surrogates - nx/ - NX/Nastran integration (solver, updater, session_manager) - study/ - Study management (creator, wizard, state, reset) - reporting/ - Reports and analysis (visualizer, report_generator) - config/ - Configuration management (manager, builder) - utils/ - Utilities (logger, auto_doc, etc.) - future/ - Research/experimental code Migration: - ~200 import changes across 125 files - All __init__.py files use lazy loading to avoid circular imports - Backwards compatibility layer supports old import paths with warnings - All existing functionality preserved To migrate existing code: OLD: from optimization_engine.nx_solver import NXSolver NEW: from optimization_engine.nx.solver import NXSolver OLD: from optimization_engine.runner import OptimizationRunner NEW: from optimization_engine.core.runner import OptimizationRunner 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
341
optimization_engine/utils/auto_doc.py
Normal file
341
optimization_engine/utils/auto_doc.py
Normal file
@@ -0,0 +1,341 @@
|
||||
"""
|
||||
Auto-Documentation Generator for Atomizer
|
||||
|
||||
This module automatically generates documentation from code, ensuring
|
||||
that skills and protocols stay in sync with the implementation.
|
||||
|
||||
Usage:
|
||||
python -m optimization_engine.auto_doc extractors
|
||||
python -m optimization_engine.auto_doc templates
|
||||
python -m optimization_engine.auto_doc all
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import importlib
|
||||
import json
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
|
||||
def get_extractor_info() -> List[Dict[str, Any]]:
|
||||
"""Extract information about all registered extractors."""
|
||||
from optimization_engine import extractors
|
||||
|
||||
extractor_info = []
|
||||
|
||||
# Get all exported functions
|
||||
for name in extractors.__all__:
|
||||
obj = getattr(extractors, name)
|
||||
|
||||
if callable(obj):
|
||||
# Get function signature
|
||||
try:
|
||||
sig = inspect.signature(obj)
|
||||
params = [
|
||||
{
|
||||
'name': p.name,
|
||||
'default': str(p.default) if p.default != inspect.Parameter.empty else None,
|
||||
'annotation': str(p.annotation) if p.annotation != inspect.Parameter.empty else None
|
||||
}
|
||||
for p in sig.parameters.values()
|
||||
]
|
||||
except (ValueError, TypeError):
|
||||
params = []
|
||||
|
||||
# Get docstring
|
||||
doc = inspect.getdoc(obj) or "No documentation available"
|
||||
|
||||
# Determine category
|
||||
category = "general"
|
||||
if "stress" in name.lower():
|
||||
category = "stress"
|
||||
elif "temperature" in name.lower() or "thermal" in name.lower() or "heat" in name.lower():
|
||||
category = "thermal"
|
||||
elif "modal" in name.lower() or "frequency" in name.lower():
|
||||
category = "modal"
|
||||
elif "zernike" in name.lower():
|
||||
category = "optical"
|
||||
elif "mass" in name.lower():
|
||||
category = "mass"
|
||||
elif "strain" in name.lower():
|
||||
category = "strain"
|
||||
elif "spc" in name.lower() or "reaction" in name.lower() or "force" in name.lower():
|
||||
category = "forces"
|
||||
|
||||
# Determine phase
|
||||
phase = "Phase 1"
|
||||
if name in ['extract_principal_stress', 'extract_max_principal_stress',
|
||||
'extract_min_principal_stress', 'extract_strain_energy',
|
||||
'extract_total_strain_energy', 'extract_strain_energy_density',
|
||||
'extract_spc_forces', 'extract_total_reaction_force',
|
||||
'extract_reaction_component', 'check_force_equilibrium']:
|
||||
phase = "Phase 2"
|
||||
elif name in ['extract_temperature', 'extract_temperature_gradient',
|
||||
'extract_heat_flux', 'get_max_temperature',
|
||||
'extract_modal_mass', 'extract_frequencies',
|
||||
'get_first_frequency', 'get_modal_mass_ratio']:
|
||||
phase = "Phase 3"
|
||||
|
||||
extractor_info.append({
|
||||
'name': name,
|
||||
'module': obj.__module__,
|
||||
'category': category,
|
||||
'phase': phase,
|
||||
'parameters': params,
|
||||
'docstring': doc,
|
||||
'is_class': inspect.isclass(obj)
|
||||
})
|
||||
|
||||
return extractor_info
|
||||
|
||||
|
||||
def get_template_info() -> List[Dict[str, Any]]:
|
||||
"""Extract information about available study templates."""
|
||||
templates_file = Path(__file__).parent / 'templates' / 'registry.json'
|
||||
|
||||
if not templates_file.exists():
|
||||
return []
|
||||
|
||||
with open(templates_file) as f:
|
||||
data = json.load(f)
|
||||
|
||||
return data.get('templates', [])
|
||||
|
||||
|
||||
def generate_extractor_markdown(extractors: List[Dict[str, Any]]) -> str:
|
||||
"""Generate markdown documentation for extractors."""
|
||||
lines = [
|
||||
"# Atomizer Extractor Library",
|
||||
"",
|
||||
f"*Auto-generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}*",
|
||||
"",
|
||||
"This document is automatically generated from the extractor source code.",
|
||||
"",
|
||||
"---",
|
||||
"",
|
||||
"## Quick Reference",
|
||||
"",
|
||||
"| Extractor | Category | Phase | Description |",
|
||||
"|-----------|----------|-------|-------------|",
|
||||
]
|
||||
|
||||
for ext in sorted(extractors, key=lambda x: (x['category'], x['name'])):
|
||||
doc_first_line = ext['docstring'].split('\n')[0][:60]
|
||||
lines.append(f"| `{ext['name']}` | {ext['category']} | {ext['phase']} | {doc_first_line} |")
|
||||
|
||||
lines.extend(["", "---", ""])
|
||||
|
||||
# Group by category
|
||||
categories = {}
|
||||
for ext in extractors:
|
||||
cat = ext['category']
|
||||
if cat not in categories:
|
||||
categories[cat] = []
|
||||
categories[cat].append(ext)
|
||||
|
||||
for cat_name, cat_extractors in sorted(categories.items()):
|
||||
lines.append(f"## {cat_name.title()} Extractors")
|
||||
lines.append("")
|
||||
|
||||
for ext in sorted(cat_extractors, key=lambda x: x['name']):
|
||||
lines.append(f"### `{ext['name']}`")
|
||||
lines.append("")
|
||||
lines.append(f"**Module**: `{ext['module']}`")
|
||||
lines.append(f"**Phase**: {ext['phase']}")
|
||||
lines.append("")
|
||||
|
||||
# Parameters
|
||||
if ext['parameters']:
|
||||
lines.append("**Parameters**:")
|
||||
lines.append("")
|
||||
for param in ext['parameters']:
|
||||
default_str = f" = `{param['default']}`" if param['default'] else ""
|
||||
lines.append(f"- `{param['name']}`{default_str}")
|
||||
lines.append("")
|
||||
|
||||
# Docstring
|
||||
lines.append("**Description**:")
|
||||
lines.append("")
|
||||
lines.append("```")
|
||||
lines.append(ext['docstring'])
|
||||
lines.append("```")
|
||||
lines.append("")
|
||||
lines.append("---")
|
||||
lines.append("")
|
||||
|
||||
return '\n'.join(lines)
|
||||
|
||||
|
||||
def generate_template_markdown(templates: List[Dict[str, Any]]) -> str:
|
||||
"""Generate markdown documentation for templates."""
|
||||
lines = [
|
||||
"# Atomizer Study Templates",
|
||||
"",
|
||||
f"*Auto-generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}*",
|
||||
"",
|
||||
"Available templates for quick study creation.",
|
||||
"",
|
||||
"---",
|
||||
"",
|
||||
"## Template Reference",
|
||||
"",
|
||||
"| Template | Objectives | Extractors |",
|
||||
"|----------|------------|------------|",
|
||||
]
|
||||
|
||||
for tmpl in templates:
|
||||
# Handle objectives that might be dicts or strings
|
||||
obj_list = tmpl.get('objectives', [])
|
||||
if obj_list and isinstance(obj_list[0], dict):
|
||||
objectives = ', '.join([o.get('name', str(o)) for o in obj_list])
|
||||
else:
|
||||
objectives = ', '.join(obj_list)
|
||||
extractors = ', '.join(tmpl.get('extractors', []))
|
||||
lines.append(f"| `{tmpl['name']}` | {objectives} | {extractors} |")
|
||||
|
||||
lines.extend(["", "---", ""])
|
||||
|
||||
for tmpl in templates:
|
||||
lines.append(f"## {tmpl['name']}")
|
||||
lines.append("")
|
||||
lines.append(f"**Description**: {tmpl.get('description', 'N/A')}")
|
||||
lines.append("")
|
||||
lines.append(f"**Category**: {tmpl.get('category', 'N/A')}")
|
||||
lines.append(f"**Solver**: {tmpl.get('solver', 'N/A')}")
|
||||
lines.append(f"**Sampler**: {tmpl.get('sampler', 'N/A')}")
|
||||
lines.append(f"**Turbo Suitable**: {'Yes' if tmpl.get('turbo_suitable') else 'No'}")
|
||||
lines.append("")
|
||||
lines.append(f"**Example Study**: `{tmpl.get('example_study', 'N/A')}`")
|
||||
lines.append("")
|
||||
|
||||
if tmpl.get('objectives'):
|
||||
lines.append("**Objectives**:")
|
||||
for obj in tmpl['objectives']:
|
||||
if isinstance(obj, dict):
|
||||
lines.append(f"- {obj.get('name', '?')} ({obj.get('direction', '?')}) - Extractor: {obj.get('extractor', '?')}")
|
||||
else:
|
||||
lines.append(f"- {obj}")
|
||||
lines.append("")
|
||||
|
||||
if tmpl.get('extractors'):
|
||||
lines.append("**Extractors Used**:")
|
||||
for ext in tmpl['extractors']:
|
||||
lines.append(f"- {ext}")
|
||||
lines.append("")
|
||||
|
||||
if tmpl.get('recommended_trials'):
|
||||
lines.append("**Recommended Trials**:")
|
||||
for key, val in tmpl['recommended_trials'].items():
|
||||
lines.append(f"- {key}: {val}")
|
||||
lines.append("")
|
||||
|
||||
lines.append("---")
|
||||
lines.append("")
|
||||
|
||||
return '\n'.join(lines)
|
||||
|
||||
|
||||
def generate_cheatsheet_update(extractors: List[Dict[str, Any]]) -> str:
|
||||
"""Generate the extractor quick reference for 01_CHEATSHEET.md."""
|
||||
lines = [
|
||||
"## Extractor Quick Reference",
|
||||
"",
|
||||
"| Physics | Extractor | Function Call |",
|
||||
"|---------|-----------|---------------|",
|
||||
]
|
||||
|
||||
# Map categories to physics names
|
||||
physics_map = {
|
||||
'stress': 'Von Mises stress',
|
||||
'thermal': 'Temperature',
|
||||
'modal': 'Natural frequency',
|
||||
'optical': 'Zernike WFE',
|
||||
'mass': 'Mass',
|
||||
'strain': 'Strain energy',
|
||||
'forces': 'Reaction forces',
|
||||
'general': 'Displacement',
|
||||
}
|
||||
|
||||
for ext in sorted(extractors, key=lambda x: x['category']):
|
||||
if ext['is_class']:
|
||||
continue
|
||||
physics = physics_map.get(ext['category'], ext['category'])
|
||||
# Build function call example
|
||||
params = ext['parameters'][:2] if ext['parameters'] else []
|
||||
param_str = ', '.join([p['name'] for p in params])
|
||||
lines.append(f"| {physics} | {ext['name']} | `{ext['name']}({param_str})` |")
|
||||
|
||||
return '\n'.join(lines)
|
||||
|
||||
|
||||
def update_atomizer_context(extractors: List[Dict[str, Any]], templates: List[Dict[str, Any]]):
|
||||
"""Update ATOMIZER_CONTEXT.md with current extractor count."""
|
||||
context_file = Path(__file__).parent.parent / '.claude' / 'ATOMIZER_CONTEXT.md'
|
||||
|
||||
if not context_file.exists():
|
||||
print(f"Warning: {context_file} not found")
|
||||
return
|
||||
|
||||
content = context_file.read_text()
|
||||
|
||||
# Update extractor library version based on count
|
||||
extractor_count = len(extractors)
|
||||
template_count = len(templates)
|
||||
|
||||
print(f"Found {extractor_count} extractors and {template_count} templates")
|
||||
|
||||
# Could add logic here to update version info based on changes
|
||||
|
||||
|
||||
def main():
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python -m optimization_engine.auto_doc [extractors|templates|all]")
|
||||
sys.exit(1)
|
||||
|
||||
command = sys.argv[1]
|
||||
|
||||
output_dir = Path(__file__).parent.parent / 'docs' / 'generated'
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if command in ['extractors', 'all']:
|
||||
print("Generating extractor documentation...")
|
||||
extractors = get_extractor_info()
|
||||
|
||||
# Write full documentation
|
||||
doc_content = generate_extractor_markdown(extractors)
|
||||
(output_dir / 'EXTRACTORS.md').write_text(doc_content)
|
||||
print(f" Written: {output_dir / 'EXTRACTORS.md'}")
|
||||
|
||||
# Write cheatsheet update
|
||||
cheatsheet = generate_cheatsheet_update(extractors)
|
||||
(output_dir / 'EXTRACTOR_CHEATSHEET.md').write_text(cheatsheet)
|
||||
print(f" Written: {output_dir / 'EXTRACTOR_CHEATSHEET.md'}")
|
||||
|
||||
print(f" Found {len(extractors)} extractors")
|
||||
|
||||
if command in ['templates', 'all']:
|
||||
print("Generating template documentation...")
|
||||
templates = get_template_info()
|
||||
|
||||
if templates:
|
||||
doc_content = generate_template_markdown(templates)
|
||||
(output_dir / 'TEMPLATES.md').write_text(doc_content)
|
||||
print(f" Written: {output_dir / 'TEMPLATES.md'}")
|
||||
print(f" Found {len(templates)} templates")
|
||||
else:
|
||||
print(" No templates found")
|
||||
|
||||
if command == 'all':
|
||||
print("\nUpdating ATOMIZER_CONTEXT.md...")
|
||||
extractors = get_extractor_info()
|
||||
templates = get_template_info()
|
||||
update_atomizer_context(extractors, templates)
|
||||
|
||||
print("\nDone!")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
415
optimization_engine/utils/codebase_analyzer.py
Normal file
415
optimization_engine/utils/codebase_analyzer.py
Normal file
@@ -0,0 +1,415 @@
|
||||
"""
|
||||
Codebase Capability Analyzer
|
||||
|
||||
Scans the Atomizer codebase to build a capability index showing what features
|
||||
are already implemented. This enables intelligent gap detection.
|
||||
|
||||
Author: Atomizer Development Team
|
||||
Version: 0.1.0 (Phase 2.5)
|
||||
Last Updated: 2025-01-16
|
||||
"""
|
||||
|
||||
import ast
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Set, Any, Optional
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class CodeCapability:
|
||||
"""Represents a discovered capability in the codebase."""
|
||||
name: str
|
||||
category: str
|
||||
file_path: Path
|
||||
confidence: float
|
||||
details: Dict[str, Any]
|
||||
|
||||
|
||||
class CodebaseCapabilityAnalyzer:
|
||||
"""Analyzes the Atomizer codebase to identify existing capabilities."""
|
||||
|
||||
def __init__(self, project_root: Optional[Path] = None):
|
||||
if project_root is None:
|
||||
# Auto-detect project root
|
||||
current = Path(__file__).resolve()
|
||||
while current.parent != current:
|
||||
if (current / 'optimization_engine').exists():
|
||||
project_root = current
|
||||
break
|
||||
current = current.parent
|
||||
|
||||
self.project_root = project_root
|
||||
self.capabilities: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
def analyze_codebase(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze the entire codebase and build capability index.
|
||||
|
||||
Returns:
|
||||
{
|
||||
'optimization': {
|
||||
'optuna_integration': True,
|
||||
'parameter_updating': True,
|
||||
'expression_parsing': True
|
||||
},
|
||||
'simulation': {
|
||||
'nx_solver': True,
|
||||
'sol101': True,
|
||||
'sol103': False
|
||||
},
|
||||
'result_extraction': {
|
||||
'displacement': True,
|
||||
'stress': True,
|
||||
'strain': False
|
||||
},
|
||||
'geometry': {
|
||||
'parameter_extraction': True,
|
||||
'expression_filtering': True
|
||||
},
|
||||
'materials': {
|
||||
'xml_generation': True
|
||||
}
|
||||
}
|
||||
"""
|
||||
capabilities = {
|
||||
'optimization': {},
|
||||
'simulation': {},
|
||||
'result_extraction': {},
|
||||
'geometry': {},
|
||||
'materials': {},
|
||||
'loads_bc': {},
|
||||
'mesh': {},
|
||||
'reporting': {}
|
||||
}
|
||||
|
||||
# Analyze optimization capabilities
|
||||
capabilities['optimization'] = self._analyze_optimization()
|
||||
|
||||
# Analyze simulation capabilities
|
||||
capabilities['simulation'] = self._analyze_simulation()
|
||||
|
||||
# Analyze result extraction capabilities
|
||||
capabilities['result_extraction'] = self._analyze_result_extraction()
|
||||
|
||||
# Analyze geometry capabilities
|
||||
capabilities['geometry'] = self._analyze_geometry()
|
||||
|
||||
# Analyze material capabilities
|
||||
capabilities['materials'] = self._analyze_materials()
|
||||
|
||||
self.capabilities = capabilities
|
||||
return capabilities
|
||||
|
||||
def _analyze_optimization(self) -> Dict[str, bool]:
|
||||
"""Analyze optimization-related capabilities."""
|
||||
capabilities = {
|
||||
'optuna_integration': False,
|
||||
'parameter_updating': False,
|
||||
'expression_parsing': False,
|
||||
'history_tracking': False
|
||||
}
|
||||
|
||||
# Check for Optuna integration
|
||||
optuna_files = list(self.project_root.glob('optimization_engine/*optuna*.py'))
|
||||
if optuna_files or self._file_contains_pattern(
|
||||
self.project_root / 'optimization_engine',
|
||||
r'import\s+optuna|from\s+optuna'
|
||||
):
|
||||
capabilities['optuna_integration'] = True
|
||||
|
||||
# Check for parameter updating
|
||||
if self._file_contains_pattern(
|
||||
self.project_root / 'optimization_engine',
|
||||
r'def\s+update_parameter|class\s+\w*Parameter\w*Updater'
|
||||
):
|
||||
capabilities['parameter_updating'] = True
|
||||
|
||||
# Check for expression parsing
|
||||
if self._file_contains_pattern(
|
||||
self.project_root / 'optimization_engine',
|
||||
r'def\s+parse_expression|def\s+extract.*expression'
|
||||
):
|
||||
capabilities['expression_parsing'] = True
|
||||
|
||||
# Check for history tracking
|
||||
if self._file_contains_pattern(
|
||||
self.project_root / 'optimization_engine',
|
||||
r'class\s+\w*History|def\s+track_history'
|
||||
):
|
||||
capabilities['history_tracking'] = True
|
||||
|
||||
return capabilities
|
||||
|
||||
def _analyze_simulation(self) -> Dict[str, bool]:
|
||||
"""Analyze simulation-related capabilities."""
|
||||
capabilities = {
|
||||
'nx_solver': False,
|
||||
'sol101': False,
|
||||
'sol103': False,
|
||||
'sol106': False,
|
||||
'journal_execution': False
|
||||
}
|
||||
|
||||
# Check for NX solver integration
|
||||
nx_solver_file = self.project_root / 'optimization_engine' / 'nx_solver.py'
|
||||
if nx_solver_file.exists():
|
||||
capabilities['nx_solver'] = True
|
||||
content = nx_solver_file.read_text(encoding='utf-8')
|
||||
|
||||
# Check for specific solution types
|
||||
if 'sol101' in content.lower() or 'SOL101' in content:
|
||||
capabilities['sol101'] = True
|
||||
if 'sol103' in content.lower() or 'SOL103' in content:
|
||||
capabilities['sol103'] = True
|
||||
if 'sol106' in content.lower() or 'SOL106' in content:
|
||||
capabilities['sol106'] = True
|
||||
|
||||
# Check for journal execution
|
||||
if self._file_contains_pattern(
|
||||
self.project_root / 'optimization_engine',
|
||||
r'def\s+run.*journal|def\s+execute.*journal'
|
||||
):
|
||||
capabilities['journal_execution'] = True
|
||||
|
||||
return capabilities
|
||||
|
||||
def _analyze_result_extraction(self) -> Dict[str, bool]:
|
||||
"""Analyze result extraction capabilities."""
|
||||
capabilities = {
|
||||
'displacement': False,
|
||||
'stress': False,
|
||||
'strain': False,
|
||||
'modal': False,
|
||||
'temperature': False
|
||||
}
|
||||
|
||||
# Check result extractors directory
|
||||
extractors_dir = self.project_root / 'optimization_engine' / 'result_extractors'
|
||||
if extractors_dir.exists():
|
||||
# Look for OP2 extraction capabilities
|
||||
for py_file in extractors_dir.glob('*.py'):
|
||||
content = py_file.read_text(encoding='utf-8')
|
||||
|
||||
# Check for displacement extraction
|
||||
if re.search(r'displacement|displacements', content, re.IGNORECASE):
|
||||
capabilities['displacement'] = True
|
||||
|
||||
# Check for stress extraction
|
||||
if re.search(r'stress|von_mises', content, re.IGNORECASE):
|
||||
capabilities['stress'] = True
|
||||
|
||||
# Check for strain extraction
|
||||
if re.search(r'strain|strains', content, re.IGNORECASE):
|
||||
# Need to verify it's actual extraction, not just a comment
|
||||
if re.search(r'def\s+\w*extract.*strain|strain.*=.*op2', content, re.IGNORECASE):
|
||||
capabilities['strain'] = True
|
||||
|
||||
# Check for modal extraction
|
||||
if re.search(r'modal|mode_shape|eigenvalue', content, re.IGNORECASE):
|
||||
capabilities['modal'] = True
|
||||
|
||||
# Check for temperature extraction
|
||||
if re.search(r'temperature|thermal', content, re.IGNORECASE):
|
||||
capabilities['temperature'] = True
|
||||
|
||||
return capabilities
|
||||
|
||||
def _analyze_geometry(self) -> Dict[str, bool]:
|
||||
"""Analyze geometry-related capabilities."""
|
||||
capabilities = {
|
||||
'parameter_extraction': False,
|
||||
'expression_filtering': False,
|
||||
'feature_creation': False
|
||||
}
|
||||
|
||||
# Check for parameter extraction (including expression reading/finding)
|
||||
if self._file_contains_pattern(
|
||||
self.project_root / 'optimization_engine',
|
||||
r'def\s+extract.*parameter|def\s+get.*parameter|def\s+find.*expression|def\s+read.*expression|def\s+get.*expression'
|
||||
):
|
||||
capabilities['parameter_extraction'] = True
|
||||
|
||||
# Check for expression filtering (v_ prefix)
|
||||
if self._file_contains_pattern(
|
||||
self.project_root / 'optimization_engine',
|
||||
r'v_|filter.*expression|contains.*v_'
|
||||
):
|
||||
capabilities['expression_filtering'] = True
|
||||
|
||||
# Check for feature creation
|
||||
if self._file_contains_pattern(
|
||||
self.project_root / 'optimization_engine',
|
||||
r'def\s+create.*feature|def\s+add.*feature'
|
||||
):
|
||||
capabilities['feature_creation'] = True
|
||||
|
||||
return capabilities
|
||||
|
||||
def _analyze_materials(self) -> Dict[str, bool]:
|
||||
"""Analyze material-related capabilities."""
|
||||
capabilities = {
|
||||
'xml_generation': False,
|
||||
'material_assignment': False
|
||||
}
|
||||
|
||||
# Check for material XML generation
|
||||
material_files = list(self.project_root.glob('optimization_engine/custom_functions/*material*.py'))
|
||||
if material_files:
|
||||
capabilities['xml_generation'] = True
|
||||
|
||||
# Check for material assignment
|
||||
if self._file_contains_pattern(
|
||||
self.project_root / 'optimization_engine',
|
||||
r'def\s+assign.*material|def\s+set.*material'
|
||||
):
|
||||
capabilities['material_assignment'] = True
|
||||
|
||||
return capabilities
|
||||
|
||||
def _file_contains_pattern(self, directory: Path, pattern: str) -> bool:
|
||||
"""Check if any Python file in directory contains the regex pattern."""
|
||||
if not directory.exists():
|
||||
return False
|
||||
|
||||
for py_file in directory.rglob('*.py'):
|
||||
try:
|
||||
content = py_file.read_text(encoding='utf-8')
|
||||
if re.search(pattern, content):
|
||||
return True
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return False
|
||||
|
||||
def get_capability_details(self, category: str, capability: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get detailed information about a specific capability."""
|
||||
if category not in self.capabilities:
|
||||
return None
|
||||
|
||||
if capability not in self.capabilities[category]:
|
||||
return None
|
||||
|
||||
if not self.capabilities[category][capability]:
|
||||
return None
|
||||
|
||||
# Find the file that implements this capability
|
||||
details = {
|
||||
'exists': True,
|
||||
'category': category,
|
||||
'name': capability,
|
||||
'implementation_files': []
|
||||
}
|
||||
|
||||
# Search for implementation files based on category
|
||||
search_patterns = {
|
||||
'optimization': ['optuna', 'parameter', 'expression'],
|
||||
'simulation': ['nx_solver', 'journal'],
|
||||
'result_extraction': ['op2', 'extractor', 'result'],
|
||||
'geometry': ['parameter', 'expression', 'geometry'],
|
||||
'materials': ['material', 'xml']
|
||||
}
|
||||
|
||||
if category in search_patterns:
|
||||
for pattern in search_patterns[category]:
|
||||
for py_file in (self.project_root / 'optimization_engine').rglob(f'*{pattern}*.py'):
|
||||
if py_file.is_file():
|
||||
details['implementation_files'].append(str(py_file.relative_to(self.project_root)))
|
||||
|
||||
return details
|
||||
|
||||
def find_similar_capabilities(self, missing_capability: str, category: str) -> List[str]:
|
||||
"""Find existing capabilities similar to the missing one."""
|
||||
if category not in self.capabilities:
|
||||
return []
|
||||
|
||||
similar = []
|
||||
|
||||
# Special case: for result_extraction, all extraction types are similar
|
||||
# because they use the same OP2 extraction pattern
|
||||
if category == 'result_extraction':
|
||||
for capability, exists in self.capabilities[category].items():
|
||||
if exists and capability != missing_capability:
|
||||
similar.append(capability)
|
||||
return similar
|
||||
|
||||
# Simple similarity: check if words overlap
|
||||
missing_words = set(missing_capability.lower().split('_'))
|
||||
|
||||
for capability, exists in self.capabilities[category].items():
|
||||
if not exists:
|
||||
continue
|
||||
|
||||
capability_words = set(capability.lower().split('_'))
|
||||
|
||||
# If there's word overlap, consider it similar
|
||||
if missing_words & capability_words:
|
||||
similar.append(capability)
|
||||
|
||||
return similar
|
||||
|
||||
def get_summary(self) -> str:
|
||||
"""Get a human-readable summary of capabilities."""
|
||||
if not self.capabilities:
|
||||
self.analyze_codebase()
|
||||
|
||||
lines = ["Atomizer Codebase Capabilities Summary", "=" * 50, ""]
|
||||
|
||||
for category, caps in self.capabilities.items():
|
||||
if not caps:
|
||||
continue
|
||||
|
||||
existing = [name for name, exists in caps.items() if exists]
|
||||
missing = [name for name, exists in caps.items() if not exists]
|
||||
|
||||
if existing:
|
||||
lines.append(f"{category.upper()}:")
|
||||
lines.append(f" Implemented ({len(existing)}):")
|
||||
for cap in existing:
|
||||
lines.append(f" - {cap}")
|
||||
|
||||
if missing:
|
||||
lines.append(f" Not Found ({len(missing)}):")
|
||||
for cap in missing:
|
||||
lines.append(f" - {cap}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
"""Test the codebase analyzer."""
|
||||
analyzer = CodebaseCapabilityAnalyzer()
|
||||
|
||||
print("Analyzing Atomizer codebase...")
|
||||
print("=" * 80)
|
||||
|
||||
capabilities = analyzer.analyze_codebase()
|
||||
|
||||
print("\nCapabilities Found:")
|
||||
print("-" * 80)
|
||||
print(analyzer.get_summary())
|
||||
|
||||
print("\nDetailed Check: Result Extraction")
|
||||
print("-" * 80)
|
||||
for capability, exists in capabilities['result_extraction'].items():
|
||||
status = "FOUND" if exists else "MISSING"
|
||||
print(f" {capability:20s} : {status}")
|
||||
|
||||
if exists:
|
||||
details = analyzer.get_capability_details('result_extraction', capability)
|
||||
if details and details.get('implementation_files'):
|
||||
print(f" Files: {', '.join(details['implementation_files'][:2])}")
|
||||
|
||||
print("\nSimilar to 'strain':")
|
||||
print("-" * 80)
|
||||
similar = analyzer.find_similar_capabilities('strain', 'result_extraction')
|
||||
if similar:
|
||||
for cap in similar:
|
||||
print(f" - {cap} (could be used as pattern)")
|
||||
else:
|
||||
print(" No similar capabilities found")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
298
optimization_engine/utils/logger.py
Normal file
298
optimization_engine/utils/logger.py
Normal file
@@ -0,0 +1,298 @@
|
||||
"""
|
||||
Atomizer Structured Logging System - Phase 1.3
|
||||
|
||||
Provides consistent, production-ready logging across all optimization studies.
|
||||
|
||||
Usage:
|
||||
from optimization_engine.utils.logger import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
logger.info("Starting optimization...")
|
||||
logger.error("Simulation failed", exc_info=True)
|
||||
|
||||
# Study-specific logger with automatic file logging
|
||||
logger = get_logger("drone_gimbal_arm", study_dir="studies/drone_gimbal_arm/2_results")
|
||||
logger.trial_start(trial_number=5, design_vars={"thickness": 2.5})
|
||||
logger.trial_complete(trial_number=5, objectives={"mass": 120, "freq": 155})
|
||||
|
||||
Features:
|
||||
- Automatic file logging to study_dir/optimization.log
|
||||
- Console output with color-coded levels (if supported)
|
||||
- Structured trial logging for dashboard integration
|
||||
- Log rotation (50MB max, 3 backups)
|
||||
- No external dependencies (stdlib only)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Optional, Dict, Any
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
|
||||
# ANSI color codes for console output (Windows 10+ and Unix)
|
||||
class LogColors:
|
||||
"""ANSI color codes for console output."""
|
||||
RESET = '\033[0m'
|
||||
BOLD = '\033[1m'
|
||||
|
||||
# Levels
|
||||
DEBUG = '\033[36m' # Cyan
|
||||
INFO = '\033[32m' # Green
|
||||
WARNING = '\033[33m' # Yellow
|
||||
ERROR = '\033[31m' # Red
|
||||
CRITICAL = '\033[35m' # Magenta
|
||||
|
||||
# Custom
|
||||
TRIAL = '\033[94m' # Bright Blue
|
||||
SUCCESS = '\033[92m' # Bright Green
|
||||
|
||||
|
||||
class ColoredFormatter(logging.Formatter):
|
||||
"""Formatter that adds color to console output."""
|
||||
|
||||
COLORS = {
|
||||
logging.DEBUG: LogColors.DEBUG,
|
||||
logging.INFO: LogColors.INFO,
|
||||
logging.WARNING: LogColors.WARNING,
|
||||
logging.ERROR: LogColors.ERROR,
|
||||
logging.CRITICAL: LogColors.CRITICAL,
|
||||
}
|
||||
|
||||
def __init__(self, fmt: str, use_colors: bool = True):
|
||||
super().__init__(fmt)
|
||||
self.use_colors = use_colors and self._supports_color()
|
||||
|
||||
def _supports_color(self) -> bool:
|
||||
"""Check if terminal supports ANSI colors."""
|
||||
# Windows 10+ supports ANSI
|
||||
if sys.platform == 'win32':
|
||||
try:
|
||||
import ctypes
|
||||
kernel32 = ctypes.windll.kernel32
|
||||
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
# Unix-like systems
|
||||
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
if self.use_colors:
|
||||
levelname = record.levelname
|
||||
color = self.COLORS.get(record.levelno, '')
|
||||
record.levelname = f"{color}{levelname}{LogColors.RESET}"
|
||||
|
||||
return super().format(record)
|
||||
|
||||
|
||||
class AtomizerLogger(logging.Logger):
|
||||
"""Extended logger with trial-specific methods."""
|
||||
|
||||
def trial_start(self, trial_number: int, design_vars: Dict[str, float]):
|
||||
"""Log trial start with design variables."""
|
||||
self.info(f"{'='*60}")
|
||||
self.info(f"Trial #{trial_number} START")
|
||||
self.info(f"{'='*60}")
|
||||
self.info("Design Variables:")
|
||||
for name, value in design_vars.items():
|
||||
if isinstance(value, float):
|
||||
self.info(f" {name}: {value:.4f}")
|
||||
else:
|
||||
self.info(f" {name}: {value}")
|
||||
|
||||
def trial_complete(self, trial_number: int, objectives: Dict[str, float],
|
||||
constraints: Optional[Dict[str, float]] = None,
|
||||
feasible: bool = True):
|
||||
"""Log trial completion with results."""
|
||||
self.info(f"\nTrial #{trial_number} COMPLETE")
|
||||
self.info("Objectives:")
|
||||
for name, value in objectives.items():
|
||||
if isinstance(value, float):
|
||||
self.info(f" {name}: {value:.4f}")
|
||||
else:
|
||||
self.info(f" {name}: {value}")
|
||||
|
||||
if constraints:
|
||||
self.info("Constraints:")
|
||||
for name, value in constraints.items():
|
||||
if isinstance(value, float):
|
||||
self.info(f" {name}: {value:.4f}")
|
||||
else:
|
||||
self.info(f" {name}: {value}")
|
||||
|
||||
status = "[OK] Feasible" if feasible else "[WARNING] Infeasible"
|
||||
self.info(f"{status}")
|
||||
self.info(f"{'='*60}\n")
|
||||
|
||||
def trial_failed(self, trial_number: int, error: str):
|
||||
"""Log trial failure."""
|
||||
self.error(f"\nTrial #{trial_number} FAILED")
|
||||
self.error(f"Error: {error}")
|
||||
self.error(f"{'='*60}\n")
|
||||
|
||||
def study_start(self, study_name: str, n_trials: int, sampler: str):
|
||||
"""Log study initialization."""
|
||||
self.info("=" * 80)
|
||||
self.info(f"OPTIMIZATION STUDY: {study_name}")
|
||||
self.info("=" * 80)
|
||||
self.info(f"Trials: {n_trials}")
|
||||
self.info(f"Sampler: {sampler}")
|
||||
self.info(f"Started: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
self.info("=" * 80)
|
||||
self.info("")
|
||||
|
||||
def study_complete(self, study_name: str, n_trials: int, n_successful: int):
|
||||
"""Log study completion."""
|
||||
self.info("")
|
||||
self.info("=" * 80)
|
||||
self.info(f"STUDY COMPLETE: {study_name}")
|
||||
self.info("=" * 80)
|
||||
self.info(f"Total trials: {n_trials}")
|
||||
self.info(f"Successful: {n_successful}")
|
||||
self.info(f"Failed/Pruned: {n_trials - n_successful}")
|
||||
self.info(f"Completed: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
self.info("=" * 80)
|
||||
|
||||
|
||||
# Register custom logger class
|
||||
logging.setLoggerClass(AtomizerLogger)
|
||||
|
||||
|
||||
def get_logger(
|
||||
name: str,
|
||||
level: int = logging.INFO,
|
||||
study_dir: Optional[Path] = None,
|
||||
console: bool = True,
|
||||
file_logging: bool = True
|
||||
) -> AtomizerLogger:
|
||||
"""
|
||||
Get or create a logger instance.
|
||||
|
||||
Args:
|
||||
name: Logger name (typically __name__ or study name)
|
||||
level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
||||
study_dir: If provided, creates log file at study_dir/optimization.log
|
||||
console: Enable console output (default: True)
|
||||
file_logging: Enable file logging (default: True, requires study_dir)
|
||||
|
||||
Returns:
|
||||
AtomizerLogger instance
|
||||
|
||||
Example:
|
||||
# Simple logger
|
||||
logger = get_logger(__name__)
|
||||
logger.info("Starting optimization...")
|
||||
|
||||
# Study logger with file output
|
||||
logger = get_logger(
|
||||
"drone_gimbal_arm",
|
||||
study_dir=Path("studies/drone_gimbal_arm/2_results")
|
||||
)
|
||||
logger.study_start("drone_gimbal_arm", n_trials=30, sampler="NSGAIISampler")
|
||||
"""
|
||||
logger = logging.getLogger(name)
|
||||
|
||||
# Only configure if not already configured
|
||||
if not logger.handlers:
|
||||
logger.setLevel(level)
|
||||
|
||||
# Console handler with colors
|
||||
if console:
|
||||
console_handler = logging.StreamHandler(sys.stdout)
|
||||
console_handler.setLevel(level)
|
||||
console_formatter = ColoredFormatter(
|
||||
fmt='[%(levelname)s] %(message)s',
|
||||
use_colors=True
|
||||
)
|
||||
console_handler.setFormatter(console_formatter)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
# File handler with rotation
|
||||
if file_logging and study_dir:
|
||||
study_dir = Path(study_dir)
|
||||
study_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_file = study_dir / "optimization.log"
|
||||
|
||||
file_handler = RotatingFileHandler(
|
||||
log_file,
|
||||
maxBytes=50 * 1024 * 1024, # 50MB
|
||||
backupCount=3,
|
||||
encoding='utf-8'
|
||||
)
|
||||
file_handler.setLevel(level)
|
||||
file_formatter = logging.Formatter(
|
||||
fmt='%(asctime)s | %(levelname)-8s | %(name)s | %(message)s',
|
||||
datefmt='%Y-%m-%d %H:%M:%S'
|
||||
)
|
||||
file_handler.setFormatter(file_formatter)
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
# Log that file logging is enabled
|
||||
logger.debug(f"File logging enabled: {log_file}")
|
||||
|
||||
# Prevent propagation to root logger
|
||||
logger.propagate = False
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
def configure_root_logger(level: int = logging.WARNING):
|
||||
"""
|
||||
Configure root logger to catch unconfigured loggers.
|
||||
|
||||
Call this once at application startup to set up default logging behavior.
|
||||
"""
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.setLevel(level)
|
||||
|
||||
if not root_logger.handlers:
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
handler.setLevel(level)
|
||||
formatter = logging.Formatter('[%(levelname)s] %(name)s: %(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
root_logger.addHandler(handler)
|
||||
|
||||
|
||||
# Example usage and testing
|
||||
if __name__ == "__main__":
|
||||
# Test basic logging
|
||||
print("Testing Atomizer Logging System")
|
||||
print("=" * 80)
|
||||
|
||||
# Simple logger
|
||||
logger = get_logger("test_module")
|
||||
logger.debug("This is a debug message")
|
||||
logger.info("This is an info message")
|
||||
logger.warning("This is a warning message")
|
||||
logger.error("This is an error message")
|
||||
|
||||
print()
|
||||
|
||||
# Study logger with file output
|
||||
test_dir = Path("test_logs")
|
||||
test_dir.mkdir(exist_ok=True)
|
||||
|
||||
study_logger = get_logger("test_study", study_dir=test_dir)
|
||||
study_logger.study_start("test_study", n_trials=5, sampler="TPESampler")
|
||||
|
||||
# Simulate trial
|
||||
study_logger.trial_start(1, {"thickness": 2.5, "width": 10.0})
|
||||
study_logger.info("Running simulation...")
|
||||
study_logger.trial_complete(
|
||||
1,
|
||||
objectives={"mass": 120.5, "stiffness": 1500.2},
|
||||
constraints={"max_stress": 85.3},
|
||||
feasible=True
|
||||
)
|
||||
|
||||
# Failed trial
|
||||
study_logger.trial_start(2, {"thickness": 1.0, "width": 5.0})
|
||||
study_logger.trial_failed(2, "Simulation convergence failure")
|
||||
|
||||
study_logger.study_complete("test_study", n_trials=5, n_successful=4)
|
||||
|
||||
print()
|
||||
print(f"Log file created at: {test_dir / 'optimization.log'}")
|
||||
print("Check the file to see structured logging output!")
|
||||
329
optimization_engine/utils/pruning_logger.py
Normal file
329
optimization_engine/utils/pruning_logger.py
Normal file
@@ -0,0 +1,329 @@
|
||||
"""
|
||||
Pruning Logger - Comprehensive tracking of failed trials during optimization.
|
||||
|
||||
This module provides detailed logging of why trials are pruned, including:
|
||||
- Validation failures
|
||||
- Simulation failures
|
||||
- OP2 extraction failures
|
||||
- Parameter values at failure
|
||||
- Error messages and stack traces
|
||||
|
||||
Usage:
|
||||
logger = PruningLogger(results_dir=Path("studies/my_study/2_results"))
|
||||
|
||||
# Log different types of failures
|
||||
logger.log_validation_failure(trial_number, params, reasons)
|
||||
logger.log_simulation_failure(trial_number, params, error_msg)
|
||||
logger.log_op2_extraction_failure(trial_number, params, exception, op2_file)
|
||||
|
||||
# Generate summary report
|
||||
logger.save_summary()
|
||||
"""
|
||||
|
||||
import json
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class PruningLogger:
|
||||
"""Comprehensive logger for tracking pruned trials during optimization."""
|
||||
|
||||
def __init__(self, results_dir: Path, verbose: bool = True):
|
||||
"""
|
||||
Initialize pruning logger.
|
||||
|
||||
Args:
|
||||
results_dir: Directory to save pruning logs (typically 2_results/)
|
||||
verbose: Print pruning events to console
|
||||
"""
|
||||
self.results_dir = Path(results_dir)
|
||||
self.results_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self.verbose = verbose
|
||||
|
||||
# Log file paths
|
||||
self.pruning_log_file = self.results_dir / "pruning_history.json"
|
||||
self.pruning_summary_file = self.results_dir / "pruning_summary.json"
|
||||
|
||||
# In-memory log
|
||||
self.pruning_events = []
|
||||
|
||||
# Load existing log if it exists
|
||||
if self.pruning_log_file.exists():
|
||||
with open(self.pruning_log_file, 'r', encoding='utf-8') as f:
|
||||
self.pruning_events = json.load(f)
|
||||
|
||||
# Statistics
|
||||
self.stats = {
|
||||
'validation_failures': 0,
|
||||
'simulation_failures': 0,
|
||||
'op2_extraction_failures': 0,
|
||||
'total_pruned': 0
|
||||
}
|
||||
|
||||
def log_validation_failure(
|
||||
self,
|
||||
trial_number: int,
|
||||
design_variables: Dict[str, float],
|
||||
validation_warnings: List[str]
|
||||
):
|
||||
"""
|
||||
Log a trial that was pruned due to validation failure.
|
||||
|
||||
Args:
|
||||
trial_number: Trial number
|
||||
design_variables: Parameter values that failed validation
|
||||
validation_warnings: List of validation error messages
|
||||
"""
|
||||
event = {
|
||||
'trial_number': trial_number,
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'pruning_cause': 'validation_failure',
|
||||
'design_variables': design_variables,
|
||||
'validation_warnings': validation_warnings,
|
||||
'details': {
|
||||
'validator_rejected': True,
|
||||
'warning_count': len(validation_warnings)
|
||||
}
|
||||
}
|
||||
|
||||
self._add_event(event)
|
||||
self.stats['validation_failures'] += 1
|
||||
|
||||
if self.verbose:
|
||||
print(f"\n[PRUNING LOG] Trial #{trial_number} - Validation Failure")
|
||||
print(f" Parameters: {self._format_params(design_variables)}")
|
||||
print(f" Reasons: {len(validation_warnings)} validation errors")
|
||||
for warning in validation_warnings:
|
||||
print(f" - {warning}")
|
||||
|
||||
def log_simulation_failure(
|
||||
self,
|
||||
trial_number: int,
|
||||
design_variables: Dict[str, float],
|
||||
error_message: str,
|
||||
return_code: Optional[int] = None,
|
||||
solver_errors: Optional[List[str]] = None
|
||||
):
|
||||
"""
|
||||
Log a trial that was pruned due to simulation failure.
|
||||
|
||||
Args:
|
||||
trial_number: Trial number
|
||||
design_variables: Parameter values
|
||||
error_message: Main error message
|
||||
return_code: Solver return code (if available)
|
||||
solver_errors: List of solver error messages from F06
|
||||
"""
|
||||
event = {
|
||||
'trial_number': trial_number,
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'pruning_cause': 'simulation_failure',
|
||||
'design_variables': design_variables,
|
||||
'error_message': error_message,
|
||||
'details': {
|
||||
'return_code': return_code,
|
||||
'solver_errors': solver_errors if solver_errors else []
|
||||
}
|
||||
}
|
||||
|
||||
self._add_event(event)
|
||||
self.stats['simulation_failures'] += 1
|
||||
|
||||
if self.verbose:
|
||||
print(f"\n[PRUNING LOG] Trial #{trial_number} - Simulation Failure")
|
||||
print(f" Parameters: {self._format_params(design_variables)}")
|
||||
print(f" Error: {error_message}")
|
||||
if return_code is not None:
|
||||
print(f" Return code: {return_code}")
|
||||
if solver_errors:
|
||||
print(f" Solver errors:")
|
||||
for err in solver_errors[:3]: # Show first 3
|
||||
print(f" - {err}")
|
||||
|
||||
def log_op2_extraction_failure(
|
||||
self,
|
||||
trial_number: int,
|
||||
design_variables: Dict[str, float],
|
||||
exception: Exception,
|
||||
op2_file: Optional[Path] = None,
|
||||
f06_file: Optional[Path] = None
|
||||
):
|
||||
"""
|
||||
Log a trial that was pruned due to OP2 extraction failure.
|
||||
|
||||
Args:
|
||||
trial_number: Trial number
|
||||
design_variables: Parameter values
|
||||
exception: The exception that was raised
|
||||
op2_file: Path to OP2 file (if exists)
|
||||
f06_file: Path to F06 file (for reference)
|
||||
"""
|
||||
# Get full stack trace
|
||||
tb = traceback.format_exc()
|
||||
|
||||
# Check if this is a pyNastran FATAL error
|
||||
is_fatal_error = 'FATAL' in str(exception) and 'op2_reader' in tb
|
||||
|
||||
# Check F06 for actual errors if provided
|
||||
f06_has_fatal = False
|
||||
f06_errors = []
|
||||
if f06_file and f06_file.exists():
|
||||
try:
|
||||
with open(f06_file, 'r', encoding='latin-1', errors='ignore') as f:
|
||||
f06_content = f.read()
|
||||
f06_has_fatal = 'FATAL' in f06_content
|
||||
# Extract fatal errors
|
||||
for line in f06_content.split('\n'):
|
||||
if 'FATAL' in line.upper() or 'ERROR' in line.upper():
|
||||
f06_errors.append(line.strip())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
event = {
|
||||
'trial_number': trial_number,
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'pruning_cause': 'op2_extraction_failure',
|
||||
'design_variables': design_variables,
|
||||
'exception_type': type(exception).__name__,
|
||||
'exception_message': str(exception),
|
||||
'stack_trace': tb,
|
||||
'details': {
|
||||
'op2_file': str(op2_file) if op2_file else None,
|
||||
'op2_exists': op2_file.exists() if op2_file else False,
|
||||
'op2_size_bytes': op2_file.stat().st_size if (op2_file and op2_file.exists()) else 0,
|
||||
'f06_file': str(f06_file) if f06_file else None,
|
||||
'is_pynastran_fatal_flag': is_fatal_error,
|
||||
'f06_has_fatal_errors': f06_has_fatal,
|
||||
'f06_errors': f06_errors[:5] # First 5 errors
|
||||
}
|
||||
}
|
||||
|
||||
self._add_event(event)
|
||||
self.stats['op2_extraction_failures'] += 1
|
||||
|
||||
if self.verbose:
|
||||
print(f"\n[PRUNING LOG] Trial #{trial_number} - OP2 Extraction Failure")
|
||||
print(f" Parameters: {self._format_params(design_variables)}")
|
||||
print(f" Exception: {type(exception).__name__}: {str(exception)}")
|
||||
if is_fatal_error and not f06_has_fatal:
|
||||
print(f" WARNING: pyNastran detected FATAL flag in OP2 header")
|
||||
print(f" BUT F06 file has NO FATAL errors!")
|
||||
print(f" This is likely a false positive - simulation may have succeeded")
|
||||
if op2_file:
|
||||
print(f" OP2 file: {op2_file.name} ({'exists' if op2_file.exists() else 'missing'})")
|
||||
if op2_file.exists():
|
||||
print(f" OP2 size: {op2_file.stat().st_size:,} bytes")
|
||||
|
||||
def _add_event(self, event: Dict[str, Any]):
|
||||
"""Add event to log and save to disk."""
|
||||
self.pruning_events.append(event)
|
||||
self.stats['total_pruned'] = len(self.pruning_events)
|
||||
|
||||
# Save incrementally
|
||||
self._save_log()
|
||||
|
||||
def _save_log(self):
|
||||
"""Save pruning log to disk."""
|
||||
with open(self.pruning_log_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(self.pruning_events, f, indent=2)
|
||||
|
||||
def save_summary(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate and save pruning summary report.
|
||||
|
||||
Returns:
|
||||
Summary dictionary
|
||||
"""
|
||||
# Analyze patterns
|
||||
validation_reasons = {}
|
||||
simulation_errors = {}
|
||||
op2_false_positives = 0
|
||||
|
||||
for event in self.pruning_events:
|
||||
if event['pruning_cause'] == 'validation_failure':
|
||||
for warning in event['validation_warnings']:
|
||||
validation_reasons[warning] = validation_reasons.get(warning, 0) + 1
|
||||
|
||||
elif event['pruning_cause'] == 'simulation_failure':
|
||||
error = event['error_message']
|
||||
simulation_errors[error] = simulation_errors.get(error, 0) + 1
|
||||
|
||||
elif event['pruning_cause'] == 'op2_extraction_failure':
|
||||
if event['details'].get('is_pynastran_fatal_flag') and not event['details'].get('f06_has_fatal_errors'):
|
||||
op2_false_positives += 1
|
||||
|
||||
summary = {
|
||||
'generated': datetime.now().isoformat(),
|
||||
'total_pruned_trials': self.stats['total_pruned'],
|
||||
'breakdown': {
|
||||
'validation_failures': self.stats['validation_failures'],
|
||||
'simulation_failures': self.stats['simulation_failures'],
|
||||
'op2_extraction_failures': self.stats['op2_extraction_failures']
|
||||
},
|
||||
'validation_failure_reasons': validation_reasons,
|
||||
'simulation_failure_types': simulation_errors,
|
||||
'op2_extraction_analysis': {
|
||||
'total_op2_failures': self.stats['op2_extraction_failures'],
|
||||
'likely_false_positives': op2_false_positives,
|
||||
'description': 'False positives are OP2 extraction failures where pyNastran detected FATAL flag but F06 has no errors'
|
||||
},
|
||||
'recommendations': self._generate_recommendations(op2_false_positives)
|
||||
}
|
||||
|
||||
# Save summary
|
||||
with open(self.pruning_summary_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(summary, f, indent=2)
|
||||
|
||||
if self.verbose:
|
||||
print(f"\n[PRUNING SUMMARY] Saved to {self.pruning_summary_file}")
|
||||
print(f" Total pruned: {summary['total_pruned_trials']}")
|
||||
print(f" Validation failures: {summary['breakdown']['validation_failures']}")
|
||||
print(f" Simulation failures: {summary['breakdown']['simulation_failures']}")
|
||||
print(f" OP2 extraction failures: {summary['breakdown']['op2_extraction_failures']}")
|
||||
if op2_false_positives > 0:
|
||||
print(f"\n WARNING: {op2_false_positives} likely FALSE POSITIVES detected!")
|
||||
print(f" These are pyNastran OP2 reader issues, not real failures")
|
||||
|
||||
return summary
|
||||
|
||||
def _generate_recommendations(self, op2_false_positives: int) -> List[str]:
|
||||
"""Generate recommendations based on pruning patterns."""
|
||||
recommendations = []
|
||||
|
||||
if op2_false_positives > 0:
|
||||
recommendations.append(
|
||||
f"CRITICAL: {op2_false_positives} trials failed due to pyNastran OP2 reader being overly strict. "
|
||||
f"Use robust_extract_first_frequency() to ignore benign FATAL flags and extract valid results."
|
||||
)
|
||||
|
||||
if self.stats['validation_failures'] == 0 and self.stats['simulation_failures'] > 0:
|
||||
recommendations.append(
|
||||
"Consider adding validation rules to catch simulation failures earlier "
|
||||
"(saves ~30 seconds per invalid trial)."
|
||||
)
|
||||
|
||||
if self.stats['total_pruned'] == 0:
|
||||
recommendations.append("Excellent! No pruning detected - all trials succeeded.")
|
||||
|
||||
return recommendations
|
||||
|
||||
def _format_params(self, params: Dict[str, float]) -> str:
|
||||
"""Format parameters for display."""
|
||||
return ", ".join(f"{k}={v:.2f}" for k, v in params.items())
|
||||
|
||||
|
||||
def create_pruning_logger(results_dir: Path, verbose: bool = True) -> PruningLogger:
|
||||
"""
|
||||
Convenience function to create a pruning logger.
|
||||
|
||||
Args:
|
||||
results_dir: Results directory for the study
|
||||
verbose: Print pruning events to console
|
||||
|
||||
Returns:
|
||||
PruningLogger instance
|
||||
"""
|
||||
return PruningLogger(results_dir, verbose)
|
||||
279
optimization_engine/utils/realtime_tracking.py
Normal file
279
optimization_engine/utils/realtime_tracking.py
Normal file
@@ -0,0 +1,279 @@
|
||||
"""
|
||||
Realtime Tracking System for Intelligent Optimizer
|
||||
|
||||
This module provides per-trial callbacks that write JSON tracking files
|
||||
immediately after each trial completes. This enables real-time dashboard
|
||||
updates and optimizer state visibility.
|
||||
|
||||
Protocol 13: Real-Time Tracking
|
||||
- Write JSON files AFTER EVERY SINGLE TRIAL
|
||||
- Use atomic writes (temp file + rename)
|
||||
- No batching allowed
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, Optional
|
||||
import optuna
|
||||
|
||||
|
||||
class RealtimeTrackingCallback:
|
||||
"""
|
||||
Optuna callback that writes tracking files after each trial.
|
||||
|
||||
Files Written (EVERY TRIAL):
|
||||
- optimizer_state.json: Current strategy, phase, confidence
|
||||
- strategy_history.json: Append-only log of all recommendations
|
||||
- trial_log.json: Append-only log of all trials with timestamps
|
||||
- landscape_snapshot.json: Latest landscape analysis (if available)
|
||||
- confidence_history.json: Confidence scores over time
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
tracking_dir: Path,
|
||||
optimizer_ref: Any, # Reference to IntelligentOptimizer instance
|
||||
verbose: bool = True
|
||||
):
|
||||
"""
|
||||
Initialize realtime tracking callback.
|
||||
|
||||
Args:
|
||||
tracking_dir: Directory to write JSON files (intelligent_optimizer/)
|
||||
optimizer_ref: Reference to parent IntelligentOptimizer for state access
|
||||
verbose: Print status messages
|
||||
"""
|
||||
self.tracking_dir = Path(tracking_dir)
|
||||
self.tracking_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.optimizer = optimizer_ref
|
||||
self.verbose = verbose
|
||||
|
||||
# Initialize tracking files
|
||||
self._initialize_files()
|
||||
|
||||
def _initialize_files(self):
|
||||
"""Create initial empty tracking files."""
|
||||
# Strategy history (append-only)
|
||||
strategy_history_file = self.tracking_dir / "strategy_history.json"
|
||||
if not strategy_history_file.exists():
|
||||
self._atomic_write(strategy_history_file, [])
|
||||
|
||||
# Trial log (append-only)
|
||||
trial_log_file = self.tracking_dir / "trial_log.json"
|
||||
if not trial_log_file.exists():
|
||||
self._atomic_write(trial_log_file, [])
|
||||
|
||||
# Confidence history (append-only)
|
||||
confidence_file = self.tracking_dir / "confidence_history.json"
|
||||
if not confidence_file.exists():
|
||||
self._atomic_write(confidence_file, [])
|
||||
|
||||
def __call__(self, study: optuna.Study, trial: optuna.trial.FrozenTrial):
|
||||
"""
|
||||
Called after each trial completes.
|
||||
|
||||
Args:
|
||||
study: Optuna study object
|
||||
trial: Completed trial
|
||||
"""
|
||||
try:
|
||||
# Skip if trial didn't complete successfully
|
||||
if trial.state != optuna.trial.TrialState.COMPLETE:
|
||||
return
|
||||
|
||||
# Write all tracking files
|
||||
self._write_optimizer_state(study, trial)
|
||||
self._write_trial_log(study, trial)
|
||||
self._write_strategy_history(study, trial)
|
||||
self._write_landscape_snapshot(study, trial)
|
||||
self._write_confidence_history(study, trial)
|
||||
|
||||
if self.verbose:
|
||||
print(f"[Realtime Tracking] Trial #{trial.number} logged to {self.tracking_dir}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[Realtime Tracking] WARNING: Failed to write tracking files: {e}")
|
||||
|
||||
def _write_optimizer_state(self, study: optuna.Study, trial: optuna.trial.FrozenTrial):
|
||||
"""Write current optimizer state."""
|
||||
# [Protocol 11] For multi-objective, strategy is always NSGA-II
|
||||
is_multi_objective = len(study.directions) > 1
|
||||
|
||||
if is_multi_objective:
|
||||
# Multi-objective studies use NSGA-II, skip adaptive characterization
|
||||
current_strategy = "NSGA-II"
|
||||
current_phase = "multi_objective_optimization"
|
||||
else:
|
||||
# Single-objective uses intelligent strategy selection
|
||||
current_strategy = getattr(self.optimizer, 'current_strategy', 'unknown')
|
||||
current_phase = getattr(self.optimizer, 'current_phase', 'unknown')
|
||||
|
||||
state = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"trial_number": trial.number,
|
||||
"total_trials": len(study.trials),
|
||||
"current_phase": current_phase,
|
||||
"current_strategy": current_strategy,
|
||||
"is_multi_objective": is_multi_objective,
|
||||
"study_directions": [str(d) for d in study.directions],
|
||||
}
|
||||
|
||||
# Add latest strategy recommendation if available
|
||||
if hasattr(self.optimizer, 'strategy_selector') and hasattr(self.optimizer.strategy_selector, 'recommendation_history'):
|
||||
history = self.optimizer.strategy_selector.recommendation_history
|
||||
if history:
|
||||
latest = history[-1]
|
||||
state["latest_recommendation"] = {
|
||||
"strategy": latest.get("strategy", "unknown"),
|
||||
"confidence": latest.get("confidence", 0.0),
|
||||
"reasoning": latest.get("reasoning", "")
|
||||
}
|
||||
|
||||
self._atomic_write(self.tracking_dir / "optimizer_state.json", state)
|
||||
|
||||
def _write_trial_log(self, study: optuna.Study, trial: optuna.trial.FrozenTrial):
|
||||
"""Append trial to trial log."""
|
||||
trial_log_file = self.tracking_dir / "trial_log.json"
|
||||
|
||||
# Read existing log
|
||||
if trial_log_file.exists():
|
||||
with open(trial_log_file, 'r') as f:
|
||||
log = json.load(f)
|
||||
else:
|
||||
log = []
|
||||
|
||||
# [Protocol 11] Handle both single and multi-objective
|
||||
is_multi_objective = len(study.directions) > 1
|
||||
|
||||
# Append new trial
|
||||
trial_entry = {
|
||||
"trial_number": trial.number,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"state": str(trial.state),
|
||||
"params": trial.params,
|
||||
"duration_seconds": (trial.datetime_complete - trial.datetime_start).total_seconds() if trial.datetime_complete else None,
|
||||
"user_attrs": dict(trial.user_attrs) if trial.user_attrs else {}
|
||||
}
|
||||
|
||||
# Add objectives (Protocol 11 compliant)
|
||||
if is_multi_objective:
|
||||
trial_entry["values"] = trial.values if trial.values is not None else None
|
||||
trial_entry["value"] = None # Not available
|
||||
else:
|
||||
trial_entry["value"] = trial.value if trial.value is not None else None
|
||||
trial_entry["values"] = None
|
||||
|
||||
log.append(trial_entry)
|
||||
self._atomic_write(trial_log_file, log)
|
||||
|
||||
def _write_strategy_history(self, study: optuna.Study, trial: optuna.trial.FrozenTrial):
|
||||
"""Append strategy recommendation to history."""
|
||||
if not hasattr(self.optimizer, 'strategy_selector'):
|
||||
return
|
||||
|
||||
strategy_file = self.tracking_dir / "strategy_history.json"
|
||||
|
||||
# Read existing history
|
||||
if strategy_file.exists():
|
||||
with open(strategy_file, 'r') as f:
|
||||
history = json.load(f)
|
||||
else:
|
||||
history = []
|
||||
|
||||
# Get latest recommendation from strategy selector
|
||||
if hasattr(self.optimizer.strategy_selector, 'recommendation_history'):
|
||||
selector_history = self.optimizer.strategy_selector.recommendation_history
|
||||
if selector_history:
|
||||
latest = selector_history[-1]
|
||||
# Only append if this is a new recommendation (not duplicate)
|
||||
if not history or history[-1].get('trial_number') != trial.number:
|
||||
history.append({
|
||||
"trial_number": trial.number,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"strategy": latest.get("strategy", "unknown"),
|
||||
"confidence": latest.get("confidence", 0.0),
|
||||
"reasoning": latest.get("reasoning", "")
|
||||
})
|
||||
|
||||
self._atomic_write(strategy_file, history)
|
||||
|
||||
def _write_landscape_snapshot(self, study: optuna.Study, trial: optuna.trial.FrozenTrial):
|
||||
"""Write latest landscape analysis snapshot."""
|
||||
if not hasattr(self.optimizer, 'landscape_cache'):
|
||||
return
|
||||
|
||||
landscape = self.optimizer.landscape_cache
|
||||
if landscape is None:
|
||||
# Multi-objective - no landscape analysis
|
||||
snapshot = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"trial_number": trial.number,
|
||||
"ready": False,
|
||||
"message": "Landscape analysis not supported for multi-objective optimization"
|
||||
}
|
||||
else:
|
||||
snapshot = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"trial_number": trial.number,
|
||||
**landscape
|
||||
}
|
||||
|
||||
self._atomic_write(self.tracking_dir / "landscape_snapshot.json", snapshot)
|
||||
|
||||
def _write_confidence_history(self, study: optuna.Study, trial: optuna.trial.FrozenTrial):
|
||||
"""Append confidence score to history."""
|
||||
confidence_file = self.tracking_dir / "confidence_history.json"
|
||||
|
||||
# Read existing history
|
||||
if confidence_file.exists():
|
||||
with open(confidence_file, 'r') as f:
|
||||
history = json.load(f)
|
||||
else:
|
||||
history = []
|
||||
|
||||
# Get confidence from latest recommendation
|
||||
confidence = 0.0
|
||||
if hasattr(self.optimizer, 'strategy_selector') and hasattr(self.optimizer.strategy_selector, 'recommendation_history'):
|
||||
selector_history = self.optimizer.strategy_selector.recommendation_history
|
||||
if selector_history:
|
||||
confidence = selector_history[-1].get("confidence", 0.0)
|
||||
|
||||
history.append({
|
||||
"trial_number": trial.number,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"confidence": confidence
|
||||
})
|
||||
|
||||
self._atomic_write(confidence_file, history)
|
||||
|
||||
def _atomic_write(self, filepath: Path, data: Any):
|
||||
"""
|
||||
Write JSON file atomically (temp file + rename).
|
||||
|
||||
This prevents dashboard from reading partial/corrupted files.
|
||||
"""
|
||||
temp_file = filepath.with_suffix('.tmp')
|
||||
try:
|
||||
with open(temp_file, 'w') as f:
|
||||
json.dump(data, f, indent=2)
|
||||
# Atomic rename
|
||||
temp_file.replace(filepath)
|
||||
except Exception as e:
|
||||
if temp_file.exists():
|
||||
temp_file.unlink()
|
||||
raise e
|
||||
|
||||
|
||||
def create_realtime_callback(tracking_dir: Path, optimizer_ref: Any, verbose: bool = True) -> RealtimeTrackingCallback:
|
||||
"""
|
||||
Factory function to create realtime tracking callback.
|
||||
|
||||
Usage in IntelligentOptimizer:
|
||||
```python
|
||||
callback = create_realtime_callback(self.tracking_dir, self, verbose=self.verbose)
|
||||
self.study.optimize(objective_function, n_trials=n, callbacks=[callback])
|
||||
```
|
||||
"""
|
||||
return RealtimeTrackingCallback(tracking_dir, optimizer_ref, verbose)
|
||||
Reference in New Issue
Block a user