BREAKING CHANGE: Module paths have been reorganized for better maintainability. Backwards compatibility aliases with deprecation warnings are provided. New Structure: - core/ - Optimization runners (runner, intelligent_optimizer, etc.) - processors/ - Data processing - surrogates/ - Neural network surrogates - nx/ - NX/Nastran integration (solver, updater, session_manager) - study/ - Study management (creator, wizard, state, reset) - reporting/ - Reports and analysis (visualizer, report_generator) - config/ - Configuration management (manager, builder) - utils/ - Utilities (logger, auto_doc, etc.) - future/ - Research/experimental code Migration: - ~200 import changes across 125 files - All __init__.py files use lazy loading to avoid circular imports - Backwards compatibility layer supports old import paths with warnings - All existing functionality preserved To migrate existing code: OLD: from optimization_engine.nx_solver import NXSolver NEW: from optimization_engine.nx.solver import NXSolver OLD: from optimization_engine.runner import OptimizationRunner NEW: from optimization_engine.core.runner import OptimizationRunner 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
342 lines
12 KiB
Python
342 lines
12 KiB
Python
"""
|
|
Auto-Documentation Generator for Atomizer
|
|
|
|
This module automatically generates documentation from code, ensuring
|
|
that skills and protocols stay in sync with the implementation.
|
|
|
|
Usage:
|
|
python -m optimization_engine.auto_doc extractors
|
|
python -m optimization_engine.auto_doc templates
|
|
python -m optimization_engine.auto_doc all
|
|
"""
|
|
|
|
import inspect
|
|
import importlib
|
|
import json
|
|
from pathlib import Path
|
|
from datetime import datetime
|
|
from typing import Dict, List, Any, Optional
|
|
|
|
|
|
def get_extractor_info() -> List[Dict[str, Any]]:
|
|
"""Extract information about all registered extractors."""
|
|
from optimization_engine import extractors
|
|
|
|
extractor_info = []
|
|
|
|
# Get all exported functions
|
|
for name in extractors.__all__:
|
|
obj = getattr(extractors, name)
|
|
|
|
if callable(obj):
|
|
# Get function signature
|
|
try:
|
|
sig = inspect.signature(obj)
|
|
params = [
|
|
{
|
|
'name': p.name,
|
|
'default': str(p.default) if p.default != inspect.Parameter.empty else None,
|
|
'annotation': str(p.annotation) if p.annotation != inspect.Parameter.empty else None
|
|
}
|
|
for p in sig.parameters.values()
|
|
]
|
|
except (ValueError, TypeError):
|
|
params = []
|
|
|
|
# Get docstring
|
|
doc = inspect.getdoc(obj) or "No documentation available"
|
|
|
|
# Determine category
|
|
category = "general"
|
|
if "stress" in name.lower():
|
|
category = "stress"
|
|
elif "temperature" in name.lower() or "thermal" in name.lower() or "heat" in name.lower():
|
|
category = "thermal"
|
|
elif "modal" in name.lower() or "frequency" in name.lower():
|
|
category = "modal"
|
|
elif "zernike" in name.lower():
|
|
category = "optical"
|
|
elif "mass" in name.lower():
|
|
category = "mass"
|
|
elif "strain" in name.lower():
|
|
category = "strain"
|
|
elif "spc" in name.lower() or "reaction" in name.lower() or "force" in name.lower():
|
|
category = "forces"
|
|
|
|
# Determine phase
|
|
phase = "Phase 1"
|
|
if name in ['extract_principal_stress', 'extract_max_principal_stress',
|
|
'extract_min_principal_stress', 'extract_strain_energy',
|
|
'extract_total_strain_energy', 'extract_strain_energy_density',
|
|
'extract_spc_forces', 'extract_total_reaction_force',
|
|
'extract_reaction_component', 'check_force_equilibrium']:
|
|
phase = "Phase 2"
|
|
elif name in ['extract_temperature', 'extract_temperature_gradient',
|
|
'extract_heat_flux', 'get_max_temperature',
|
|
'extract_modal_mass', 'extract_frequencies',
|
|
'get_first_frequency', 'get_modal_mass_ratio']:
|
|
phase = "Phase 3"
|
|
|
|
extractor_info.append({
|
|
'name': name,
|
|
'module': obj.__module__,
|
|
'category': category,
|
|
'phase': phase,
|
|
'parameters': params,
|
|
'docstring': doc,
|
|
'is_class': inspect.isclass(obj)
|
|
})
|
|
|
|
return extractor_info
|
|
|
|
|
|
def get_template_info() -> List[Dict[str, Any]]:
|
|
"""Extract information about available study templates."""
|
|
templates_file = Path(__file__).parent / 'templates' / 'registry.json'
|
|
|
|
if not templates_file.exists():
|
|
return []
|
|
|
|
with open(templates_file) as f:
|
|
data = json.load(f)
|
|
|
|
return data.get('templates', [])
|
|
|
|
|
|
def generate_extractor_markdown(extractors: List[Dict[str, Any]]) -> str:
|
|
"""Generate markdown documentation for extractors."""
|
|
lines = [
|
|
"# Atomizer Extractor Library",
|
|
"",
|
|
f"*Auto-generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}*",
|
|
"",
|
|
"This document is automatically generated from the extractor source code.",
|
|
"",
|
|
"---",
|
|
"",
|
|
"## Quick Reference",
|
|
"",
|
|
"| Extractor | Category | Phase | Description |",
|
|
"|-----------|----------|-------|-------------|",
|
|
]
|
|
|
|
for ext in sorted(extractors, key=lambda x: (x['category'], x['name'])):
|
|
doc_first_line = ext['docstring'].split('\n')[0][:60]
|
|
lines.append(f"| `{ext['name']}` | {ext['category']} | {ext['phase']} | {doc_first_line} |")
|
|
|
|
lines.extend(["", "---", ""])
|
|
|
|
# Group by category
|
|
categories = {}
|
|
for ext in extractors:
|
|
cat = ext['category']
|
|
if cat not in categories:
|
|
categories[cat] = []
|
|
categories[cat].append(ext)
|
|
|
|
for cat_name, cat_extractors in sorted(categories.items()):
|
|
lines.append(f"## {cat_name.title()} Extractors")
|
|
lines.append("")
|
|
|
|
for ext in sorted(cat_extractors, key=lambda x: x['name']):
|
|
lines.append(f"### `{ext['name']}`")
|
|
lines.append("")
|
|
lines.append(f"**Module**: `{ext['module']}`")
|
|
lines.append(f"**Phase**: {ext['phase']}")
|
|
lines.append("")
|
|
|
|
# Parameters
|
|
if ext['parameters']:
|
|
lines.append("**Parameters**:")
|
|
lines.append("")
|
|
for param in ext['parameters']:
|
|
default_str = f" = `{param['default']}`" if param['default'] else ""
|
|
lines.append(f"- `{param['name']}`{default_str}")
|
|
lines.append("")
|
|
|
|
# Docstring
|
|
lines.append("**Description**:")
|
|
lines.append("")
|
|
lines.append("```")
|
|
lines.append(ext['docstring'])
|
|
lines.append("```")
|
|
lines.append("")
|
|
lines.append("---")
|
|
lines.append("")
|
|
|
|
return '\n'.join(lines)
|
|
|
|
|
|
def generate_template_markdown(templates: List[Dict[str, Any]]) -> str:
|
|
"""Generate markdown documentation for templates."""
|
|
lines = [
|
|
"# Atomizer Study Templates",
|
|
"",
|
|
f"*Auto-generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}*",
|
|
"",
|
|
"Available templates for quick study creation.",
|
|
"",
|
|
"---",
|
|
"",
|
|
"## Template Reference",
|
|
"",
|
|
"| Template | Objectives | Extractors |",
|
|
"|----------|------------|------------|",
|
|
]
|
|
|
|
for tmpl in templates:
|
|
# Handle objectives that might be dicts or strings
|
|
obj_list = tmpl.get('objectives', [])
|
|
if obj_list and isinstance(obj_list[0], dict):
|
|
objectives = ', '.join([o.get('name', str(o)) for o in obj_list])
|
|
else:
|
|
objectives = ', '.join(obj_list)
|
|
extractors = ', '.join(tmpl.get('extractors', []))
|
|
lines.append(f"| `{tmpl['name']}` | {objectives} | {extractors} |")
|
|
|
|
lines.extend(["", "---", ""])
|
|
|
|
for tmpl in templates:
|
|
lines.append(f"## {tmpl['name']}")
|
|
lines.append("")
|
|
lines.append(f"**Description**: {tmpl.get('description', 'N/A')}")
|
|
lines.append("")
|
|
lines.append(f"**Category**: {tmpl.get('category', 'N/A')}")
|
|
lines.append(f"**Solver**: {tmpl.get('solver', 'N/A')}")
|
|
lines.append(f"**Sampler**: {tmpl.get('sampler', 'N/A')}")
|
|
lines.append(f"**Turbo Suitable**: {'Yes' if tmpl.get('turbo_suitable') else 'No'}")
|
|
lines.append("")
|
|
lines.append(f"**Example Study**: `{tmpl.get('example_study', 'N/A')}`")
|
|
lines.append("")
|
|
|
|
if tmpl.get('objectives'):
|
|
lines.append("**Objectives**:")
|
|
for obj in tmpl['objectives']:
|
|
if isinstance(obj, dict):
|
|
lines.append(f"- {obj.get('name', '?')} ({obj.get('direction', '?')}) - Extractor: {obj.get('extractor', '?')}")
|
|
else:
|
|
lines.append(f"- {obj}")
|
|
lines.append("")
|
|
|
|
if tmpl.get('extractors'):
|
|
lines.append("**Extractors Used**:")
|
|
for ext in tmpl['extractors']:
|
|
lines.append(f"- {ext}")
|
|
lines.append("")
|
|
|
|
if tmpl.get('recommended_trials'):
|
|
lines.append("**Recommended Trials**:")
|
|
for key, val in tmpl['recommended_trials'].items():
|
|
lines.append(f"- {key}: {val}")
|
|
lines.append("")
|
|
|
|
lines.append("---")
|
|
lines.append("")
|
|
|
|
return '\n'.join(lines)
|
|
|
|
|
|
def generate_cheatsheet_update(extractors: List[Dict[str, Any]]) -> str:
|
|
"""Generate the extractor quick reference for 01_CHEATSHEET.md."""
|
|
lines = [
|
|
"## Extractor Quick Reference",
|
|
"",
|
|
"| Physics | Extractor | Function Call |",
|
|
"|---------|-----------|---------------|",
|
|
]
|
|
|
|
# Map categories to physics names
|
|
physics_map = {
|
|
'stress': 'Von Mises stress',
|
|
'thermal': 'Temperature',
|
|
'modal': 'Natural frequency',
|
|
'optical': 'Zernike WFE',
|
|
'mass': 'Mass',
|
|
'strain': 'Strain energy',
|
|
'forces': 'Reaction forces',
|
|
'general': 'Displacement',
|
|
}
|
|
|
|
for ext in sorted(extractors, key=lambda x: x['category']):
|
|
if ext['is_class']:
|
|
continue
|
|
physics = physics_map.get(ext['category'], ext['category'])
|
|
# Build function call example
|
|
params = ext['parameters'][:2] if ext['parameters'] else []
|
|
param_str = ', '.join([p['name'] for p in params])
|
|
lines.append(f"| {physics} | {ext['name']} | `{ext['name']}({param_str})` |")
|
|
|
|
return '\n'.join(lines)
|
|
|
|
|
|
def update_atomizer_context(extractors: List[Dict[str, Any]], templates: List[Dict[str, Any]]):
|
|
"""Update ATOMIZER_CONTEXT.md with current extractor count."""
|
|
context_file = Path(__file__).parent.parent / '.claude' / 'ATOMIZER_CONTEXT.md'
|
|
|
|
if not context_file.exists():
|
|
print(f"Warning: {context_file} not found")
|
|
return
|
|
|
|
content = context_file.read_text()
|
|
|
|
# Update extractor library version based on count
|
|
extractor_count = len(extractors)
|
|
template_count = len(templates)
|
|
|
|
print(f"Found {extractor_count} extractors and {template_count} templates")
|
|
|
|
# Could add logic here to update version info based on changes
|
|
|
|
|
|
def main():
|
|
import sys
|
|
|
|
if len(sys.argv) < 2:
|
|
print("Usage: python -m optimization_engine.auto_doc [extractors|templates|all]")
|
|
sys.exit(1)
|
|
|
|
command = sys.argv[1]
|
|
|
|
output_dir = Path(__file__).parent.parent / 'docs' / 'generated'
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
if command in ['extractors', 'all']:
|
|
print("Generating extractor documentation...")
|
|
extractors = get_extractor_info()
|
|
|
|
# Write full documentation
|
|
doc_content = generate_extractor_markdown(extractors)
|
|
(output_dir / 'EXTRACTORS.md').write_text(doc_content)
|
|
print(f" Written: {output_dir / 'EXTRACTORS.md'}")
|
|
|
|
# Write cheatsheet update
|
|
cheatsheet = generate_cheatsheet_update(extractors)
|
|
(output_dir / 'EXTRACTOR_CHEATSHEET.md').write_text(cheatsheet)
|
|
print(f" Written: {output_dir / 'EXTRACTOR_CHEATSHEET.md'}")
|
|
|
|
print(f" Found {len(extractors)} extractors")
|
|
|
|
if command in ['templates', 'all']:
|
|
print("Generating template documentation...")
|
|
templates = get_template_info()
|
|
|
|
if templates:
|
|
doc_content = generate_template_markdown(templates)
|
|
(output_dir / 'TEMPLATES.md').write_text(doc_content)
|
|
print(f" Written: {output_dir / 'TEMPLATES.md'}")
|
|
print(f" Found {len(templates)} templates")
|
|
else:
|
|
print(" No templates found")
|
|
|
|
if command == 'all':
|
|
print("\nUpdating ATOMIZER_CONTEXT.md...")
|
|
extractors = get_extractor_info()
|
|
templates = get_template_info()
|
|
update_atomizer_context(extractors, templates)
|
|
|
|
print("\nDone!")
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|