Moved experimental LLM integration code to optimization_engine/future/: - llm_optimization_runner.py - Runtime LLM API runner - llm_workflow_analyzer.py - Workflow analysis - inline_code_generator.py - Auto-generate calculations - hook_generator.py - Auto-generate hooks - report_generator.py - LLM report generation - extractor_orchestrator.py - Extractor orchestration Added comprehensive optimization_engine/future/README.md explaining: - MVP LLM strategy (Claude Code skills, not runtime LLM) - Why files were archived - When to revisit post-MVP - Production architecture reference Production runner confirmed: optimization_engine/runner.py is sole active runner. This establishes clear separation between: - Production code (stable, no runtime LLM dependencies) - Experimental code (archived for post-MVP exploration) Part of Phase 1: Core Stabilization & Organization for MVP Generated with Claude Code Co-Authored-By: Claude <noreply@anthropic.com>
135 lines
5.0 KiB
Python
135 lines
5.0 KiB
Python
"""
|
|
Report Generator Utility
|
|
Generates Markdown/HTML/PDF reports for optimization studies
|
|
"""
|
|
|
|
import json
|
|
from pathlib import Path
|
|
from typing import Optional
|
|
import markdown
|
|
from datetime import datetime
|
|
|
|
def generate_study_report(
|
|
study_dir: Path,
|
|
output_format: str = "markdown",
|
|
include_llm_summary: bool = False
|
|
) -> Optional[Path]:
|
|
"""
|
|
Generate a report for the study.
|
|
|
|
Args:
|
|
study_dir: Path to the study directory
|
|
output_format: 'markdown', 'html', or 'pdf'
|
|
include_llm_summary: Whether to include AI-generated summary
|
|
|
|
Returns:
|
|
Path to the generated report file
|
|
"""
|
|
try:
|
|
# Load data
|
|
config_path = study_dir / "1_setup" / "optimization_config.json"
|
|
history_path = study_dir / "2_results" / "optimization_history_incremental.json"
|
|
|
|
if not config_path.exists() or not history_path.exists():
|
|
return None
|
|
|
|
with open(config_path) as f:
|
|
config = json.load(f)
|
|
|
|
with open(history_path) as f:
|
|
history = json.load(f)
|
|
|
|
# Find best trial
|
|
best_trial = None
|
|
if history:
|
|
best_trial = min(history, key=lambda x: x['objective'])
|
|
|
|
# Generate Markdown content
|
|
md_content = f"""# Optimization Report: {config.get('study_name', study_dir.name)}
|
|
|
|
**Date**: {datetime.now().strftime('%Y-%m-%d %H:%M')}
|
|
**Status**: {'Completed' if len(history) >= config.get('optimization_settings', {}).get('n_trials', 50) else 'In Progress'}
|
|
|
|
## Executive Summary
|
|
{_generate_summary(history, best_trial, include_llm_summary)}
|
|
|
|
## Study Configuration
|
|
- **Objectives**: {', '.join([o['name'] for o in config.get('objectives', [])])}
|
|
- **Design Variables**: {len(config.get('design_variables', []))} variables
|
|
- **Total Trials**: {len(history)}
|
|
|
|
## Best Result (Trial #{best_trial['trial_number'] if best_trial else 'N/A'})
|
|
- **Objective Value**: {best_trial['objective'] if best_trial else 'N/A'}
|
|
- **Parameters**:
|
|
"""
|
|
|
|
if best_trial:
|
|
for k, v in best_trial['design_variables'].items():
|
|
md_content += f" - **{k}**: {v:.4f}\n"
|
|
|
|
md_content += "\n## Optimization Progress\n"
|
|
md_content += "The optimization process showed convergence towards the optimal solution.\n"
|
|
|
|
# Save report based on format
|
|
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
|
output_dir = study_dir / "2_results"
|
|
|
|
if output_format in ['markdown', 'md']:
|
|
output_path = output_dir / f"optimization_report_{timestamp}.md"
|
|
with open(output_path, 'w') as f:
|
|
f.write(md_content)
|
|
|
|
elif output_format == 'html':
|
|
output_path = output_dir / f"optimization_report_{timestamp}.html"
|
|
html_content = markdown.markdown(md_content)
|
|
# Add basic styling
|
|
styled_html = f"""
|
|
<html>
|
|
<head>
|
|
<style>
|
|
body {{ font-family: sans-serif; max-width: 800px; margin: 0 auto; padding: 20px; }}
|
|
h1 {{ color: #2563eb; }}
|
|
h2 {{ border-bottom: 1px solid #e5e7eb; padding-bottom: 10px; margin-top: 30px; }}
|
|
code {{ background: #f3f4f6; padding: 2px 4px; rounded: 4px; }}
|
|
</style>
|
|
</head>
|
|
<body>
|
|
{html_content}
|
|
</body>
|
|
</html>
|
|
"""
|
|
with open(output_path, 'w') as f:
|
|
f.write(styled_html)
|
|
|
|
elif output_format == 'pdf':
|
|
# Requires weasyprint
|
|
try:
|
|
from weasyprint import HTML
|
|
output_path = output_dir / f"optimization_report_{timestamp}.pdf"
|
|
html_content = markdown.markdown(md_content)
|
|
HTML(string=html_content).write_pdf(str(output_path))
|
|
except ImportError:
|
|
print("WeasyPrint not installed, falling back to HTML")
|
|
return generate_study_report(study_dir, 'html', include_llm_summary)
|
|
|
|
return output_path
|
|
|
|
except Exception as e:
|
|
print(f"Report generation error: {e}")
|
|
return None
|
|
|
|
def _generate_summary(history, best_trial, use_llm):
|
|
if use_llm:
|
|
return "[AI Summary Placeholder] The optimization successfully identified a design that minimizes mass while satisfying all constraints."
|
|
|
|
if not history:
|
|
return "No trials completed yet."
|
|
|
|
improvement = 0
|
|
if len(history) > 1:
|
|
first = history[0]['objective']
|
|
best = best_trial['objective']
|
|
improvement = ((first - best) / first) * 100
|
|
|
|
return f"The optimization run completed {len(history)} trials. The best design found (Trial #{best_trial['trial_number']}) achieved an objective value of {best_trial['objective']:.4f}, representing a {improvement:.1f}% improvement over the initial design."
|