153 lines
5.2 KiB
Python
153 lines
5.2 KiB
Python
|
|
"""
|
||
|
|
Generate human-readable optimization reports from incremental history JSON.
|
||
|
|
|
||
|
|
This script should be run automatically at the end of optimization, or manually
|
||
|
|
to generate a report for any completed optimization study.
|
||
|
|
"""
|
||
|
|
|
||
|
|
import json
|
||
|
|
import sys
|
||
|
|
from pathlib import Path
|
||
|
|
from typing import Dict, Any, List
|
||
|
|
import numpy as np
|
||
|
|
|
||
|
|
|
||
|
|
def generate_optimization_report(history_file: Path, target_value: float = None, tolerance: float = 0.1) -> str:
|
||
|
|
"""
|
||
|
|
Generate a comprehensive human-readable optimization report.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
history_file: Path to optimization_history_incremental.json
|
||
|
|
target_value: Target objective value (if applicable)
|
||
|
|
tolerance: Acceptable tolerance for success (default 0.1)
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
Report text as a string
|
||
|
|
"""
|
||
|
|
# Load history
|
||
|
|
with open(history_file) as f:
|
||
|
|
history = json.load(f)
|
||
|
|
|
||
|
|
if not history:
|
||
|
|
return "No optimization history found."
|
||
|
|
|
||
|
|
report = []
|
||
|
|
report.append('=' * 80)
|
||
|
|
report.append('OPTIMIZATION REPORT')
|
||
|
|
report.append('=' * 80)
|
||
|
|
report.append('')
|
||
|
|
|
||
|
|
# Study information
|
||
|
|
study_dir = history_file.parent.parent.parent
|
||
|
|
study_name = study_dir.name
|
||
|
|
report.append('STUDY INFORMATION')
|
||
|
|
report.append('-' * 80)
|
||
|
|
report.append(f'Study: {study_name}')
|
||
|
|
report.append(f'Total trials: {len(history)}')
|
||
|
|
report.append('')
|
||
|
|
|
||
|
|
# Design variables
|
||
|
|
first_trial = history[0]
|
||
|
|
design_vars = list(first_trial['design_variables'].keys())
|
||
|
|
report.append('DESIGN VARIABLES')
|
||
|
|
report.append('-' * 80)
|
||
|
|
for var in design_vars:
|
||
|
|
values = [t['design_variables'][var] for t in history]
|
||
|
|
report.append(f' {var}:')
|
||
|
|
report.append(f' Range: {min(values):.4f} - {max(values):.4f}')
|
||
|
|
report.append(f' Mean: {np.mean(values):.4f}')
|
||
|
|
report.append('')
|
||
|
|
|
||
|
|
# Objective results
|
||
|
|
results = list(first_trial['results'].keys())
|
||
|
|
report.append('OBJECTIVE RESULTS')
|
||
|
|
report.append('-' * 80)
|
||
|
|
for result in results:
|
||
|
|
values = [t['results'][result] for t in history]
|
||
|
|
report.append(f' {result}:')
|
||
|
|
report.append(f' Range: {min(values):.4f} - {max(values):.4f}')
|
||
|
|
report.append(f' Mean: {np.mean(values):.4f}')
|
||
|
|
report.append(f' Std dev: {np.std(values):.4f}')
|
||
|
|
report.append('')
|
||
|
|
|
||
|
|
# Best trial
|
||
|
|
objectives = [t['objective'] for t in history]
|
||
|
|
best_trial = history[np.argmin(objectives)]
|
||
|
|
|
||
|
|
report.append('BEST TRIAL')
|
||
|
|
report.append('-' * 80)
|
||
|
|
report.append(f'Trial #{best_trial["trial_number"]}')
|
||
|
|
report.append(f' Objective value: {best_trial["objective"]:.4f}')
|
||
|
|
report.append(' Design variables:')
|
||
|
|
for var, value in best_trial['design_variables'].items():
|
||
|
|
report.append(f' {var}: {value:.4f}')
|
||
|
|
report.append(' Results:')
|
||
|
|
for result, value in best_trial['results'].items():
|
||
|
|
report.append(f' {result}: {value:.4f}')
|
||
|
|
report.append('')
|
||
|
|
|
||
|
|
# Top 5 trials
|
||
|
|
report.append('TOP 5 TRIALS (by objective value)')
|
||
|
|
report.append('-' * 80)
|
||
|
|
sorted_history = sorted(history, key=lambda x: x['objective'])
|
||
|
|
for i, trial in enumerate(sorted_history[:5], 1):
|
||
|
|
report.append(f'{i}. Trial #{trial["trial_number"]}: Objective = {trial["objective"]:.4f}')
|
||
|
|
vars_str = ', '.join([f'{k}={v:.2f}' for k, v in trial['design_variables'].items()])
|
||
|
|
report.append(f' {vars_str}')
|
||
|
|
report.append('')
|
||
|
|
|
||
|
|
# Success assessment (if target provided)
|
||
|
|
if target_value is not None:
|
||
|
|
report.append('SUCCESS ASSESSMENT')
|
||
|
|
report.append('-' * 80)
|
||
|
|
best_objective = min(objectives)
|
||
|
|
error = abs(best_objective - target_value)
|
||
|
|
|
||
|
|
if error <= tolerance:
|
||
|
|
report.append(f'[SUCCESS] Target {target_value} achieved within tolerance {tolerance}!')
|
||
|
|
report.append(f' Best objective: {best_objective:.4f}')
|
||
|
|
report.append(f' Error: {error:.4f}')
|
||
|
|
else:
|
||
|
|
report.append(f'[INCOMPLETE] Target {target_value} not achieved')
|
||
|
|
report.append(f' Best objective: {best_objective:.4f}')
|
||
|
|
report.append(f' Error: {error:.4f}')
|
||
|
|
report.append(f' Need {error - tolerance:.4f} improvement')
|
||
|
|
report.append('')
|
||
|
|
|
||
|
|
report.append('=' * 80)
|
||
|
|
|
||
|
|
return '\n'.join(report)
|
||
|
|
|
||
|
|
|
||
|
|
def main():
|
||
|
|
"""Command-line interface for report generation."""
|
||
|
|
if len(sys.argv) < 2:
|
||
|
|
print("Usage: python generate_report.py <history_file> [target_value] [tolerance]")
|
||
|
|
print("Example: python generate_report.py studies/my_study/2_substudies/results/optimization_history_incremental.json 115.0 0.1")
|
||
|
|
sys.exit(1)
|
||
|
|
|
||
|
|
history_file = Path(sys.argv[1])
|
||
|
|
if not history_file.exists():
|
||
|
|
print(f"Error: History file not found: {history_file}")
|
||
|
|
sys.exit(1)
|
||
|
|
|
||
|
|
target_value = float(sys.argv[2]) if len(sys.argv) > 2 else None
|
||
|
|
tolerance = float(sys.argv[3]) if len(sys.argv) > 3 else 0.1
|
||
|
|
|
||
|
|
# Generate report
|
||
|
|
report = generate_optimization_report(history_file, target_value, tolerance)
|
||
|
|
|
||
|
|
# Save report
|
||
|
|
report_file = history_file.parent / 'OPTIMIZATION_REPORT.txt'
|
||
|
|
with open(report_file, 'w') as f:
|
||
|
|
f.write(report)
|
||
|
|
|
||
|
|
# Print to console
|
||
|
|
print(report)
|
||
|
|
print()
|
||
|
|
print(f"Report saved to: {report_file}")
|
||
|
|
|
||
|
|
|
||
|
|
if __name__ == '__main__':
|
||
|
|
main()
|