feat: Enhance dashboard with charts, study report viewer, and pruning tracking

- Add ConvergencePlot component with running best, statistics, gradient fill
- Add ParameterImportanceChart with Pearson correlation analysis
- Add StudyReportViewer with KaTeX math rendering and full markdown support
- Update pruning endpoint to query Optuna database directly
- Add /report endpoint for STUDY_REPORT.md files
- Fix chart data transformation for single/multi-objective studies
- Update Protocol 13 documentation with new components
- Update generate-report skill with dashboard integration

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Antoine
2025-12-02 22:01:49 -05:00
parent ec5e42d733
commit 75d7036193
10 changed files with 2917 additions and 66 deletions

View File

@@ -21,6 +21,15 @@ router = APIRouter()
# Base studies directory
STUDIES_DIR = Path(__file__).parent.parent.parent.parent.parent / "studies"
def get_results_dir(study_dir: Path) -> Path:
"""Get the results directory for a study, supporting both 2_results and 3_results."""
results_dir = study_dir / "2_results"
if not results_dir.exists():
results_dir = study_dir / "3_results"
return results_dir
@router.get("/studies")
async def list_studies():
"""List all available optimization studies"""
@@ -44,8 +53,10 @@ async def list_studies():
with open(config_file) as f:
config = json.load(f)
# Check if results directory exists
# Check if results directory exists (support both 2_results and 3_results)
results_dir = study_dir / "2_results"
if not results_dir.exists():
results_dir = study_dir / "3_results"
# Check for Optuna database (Protocol 10) or JSON history (other protocols)
study_db = results_dir / "study.db"
@@ -149,8 +160,8 @@ async def get_study_status(study_id: str):
with open(config_file) as f:
config = json.load(f)
# Check for results
results_dir = study_dir / "2_results"
# Check for results (support both 2_results and 3_results)
results_dir = get_results_dir(study_dir)
study_db = results_dir / "study.db"
history_file = results_dir / "optimization_history_incremental.json"
@@ -267,7 +278,7 @@ async def get_optimization_history(study_id: str, limit: Optional[int] = None):
"""Get optimization history (all trials)"""
try:
study_dir = STUDIES_DIR / study_id
results_dir = study_dir / "2_results"
results_dir = get_results_dir(study_dir)
study_db = results_dir / "study.db"
history_file = results_dir / "optimization_history_incremental.json"
@@ -323,16 +334,24 @@ async def get_optimization_history(study_id: str, limit: Optional[int] = None):
except (ValueError, TypeError):
user_attrs[key] = value_json
# Extract relevant metrics for results (mass, frequency, stress, displacement, etc.)
# Extract ALL numeric metrics from user_attrs for results
# This ensures multi-objective studies show all Zernike metrics, RMS values, etc.
results = {}
if "mass" in user_attrs:
results["mass"] = user_attrs["mass"]
if "frequency" in user_attrs:
results["frequency"] = user_attrs["frequency"]
if "max_stress" in user_attrs:
results["max_stress"] = user_attrs["max_stress"]
if "max_displacement" in user_attrs:
results["max_displacement"] = user_attrs["max_displacement"]
excluded_keys = {"design_vars", "constraint_satisfied", "constraint_violations"}
for key, val in user_attrs.items():
if key in excluded_keys:
continue
# Include numeric values and lists of numbers
if isinstance(val, (int, float)):
results[key] = val
elif isinstance(val, list) and len(val) > 0 and isinstance(val[0], (int, float)):
# For lists, store as-is (e.g., Zernike coefficients)
results[key] = val
elif key == "objectives" and isinstance(val, dict):
# Extract nested objectives dict (Zernike multi-objective studies)
for obj_key, obj_val in val.items():
if isinstance(obj_val, (int, float)):
results[obj_key] = obj_val
# Fallback to first frequency from objectives if available
if not results and len(values) > 0:
results["first_frequency"] = values[0]
@@ -378,18 +397,69 @@ async def get_optimization_history(study_id: str, limit: Optional[int] = None):
@router.get("/studies/{study_id}/pruning")
async def get_pruning_history(study_id: str):
"""Get pruning diagnostics"""
"""Get pruning diagnostics from Optuna database or legacy JSON file"""
try:
study_dir = STUDIES_DIR / study_id
pruning_file = study_dir / "2_results" / "pruning_history.json"
results_dir = get_results_dir(study_dir)
study_db = results_dir / "study.db"
pruning_file = results_dir / "pruning_history.json"
# Protocol 10+: Read from Optuna database
if study_db.exists():
conn = sqlite3.connect(str(study_db))
cursor = conn.cursor()
# Get all pruned trials from Optuna database
cursor.execute("""
SELECT t.trial_id, t.number, t.datetime_start, t.datetime_complete
FROM trials t
WHERE t.state = 'PRUNED'
ORDER BY t.number DESC
""")
pruned_rows = cursor.fetchall()
pruned_trials = []
for trial_id, trial_num, start_time, end_time in pruned_rows:
# Get parameters for this trial
cursor.execute("""
SELECT param_name, param_value
FROM trial_params
WHERE trial_id = ?
""", (trial_id,))
params = {row[0]: row[1] for row in cursor.fetchall()}
# Get user attributes (may contain pruning cause)
cursor.execute("""
SELECT key, value_json
FROM trial_user_attributes
WHERE trial_id = ?
""", (trial_id,))
user_attrs = {}
for key, value_json in cursor.fetchall():
try:
user_attrs[key] = json.loads(value_json)
except (ValueError, TypeError):
user_attrs[key] = value_json
pruned_trials.append({
"trial_number": trial_num,
"params": params,
"pruning_cause": user_attrs.get("pruning_cause", "Unknown"),
"start_time": start_time,
"end_time": end_time
})
conn.close()
return {"pruned_trials": pruned_trials, "count": len(pruned_trials)}
# Legacy: Read from JSON history
if not pruning_file.exists():
return {"pruned_trials": []}
return {"pruned_trials": [], "count": 0}
with open(pruning_file) as f:
pruning_history = json.load(f)
return {"pruned_trials": pruning_history}
return {"pruned_trials": pruning_history, "count": len(pruning_history)}
except FileNotFoundError:
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -468,7 +538,7 @@ async def get_optimizer_state(study_id: str):
"""Read realtime optimizer state from intelligent_optimizer/ (Protocol 13)"""
try:
study_dir = STUDIES_DIR / study_id
results_dir = study_dir / "2_results"
results_dir = get_results_dir(study_dir)
state_file = results_dir / "intelligent_optimizer" / "optimizer_state.json"
if not state_file.exists():
@@ -489,7 +559,7 @@ async def get_pareto_front(study_id: str):
"""Get Pareto-optimal solutions for multi-objective studies (Protocol 13)"""
try:
study_dir = STUDIES_DIR / study_id
results_dir = study_dir / "2_results"
results_dir = get_results_dir(study_dir)
study_db = results_dir / "study.db"
if not study_db.exists():
@@ -700,7 +770,7 @@ async def get_optuna_dashboard_url(study_id: str):
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
results_dir = study_dir / "2_results"
results_dir = get_results_dir(study_dir)
study_db = results_dir / "study.db"
if not study_db.exists():
@@ -809,7 +879,7 @@ async def download_report(study_id: str, filename: str):
raise HTTPException(status_code=400, detail="Invalid filename")
study_dir = STUDIES_DIR / study_id
results_dir = study_dir / "2_results"
results_dir = get_results_dir(study_dir)
file_path = results_dir / filename
@@ -838,3 +908,41 @@ async def download_report(study_id: str, filename: str):
raise HTTPException(status_code=404, detail=f"Report file not found")
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to download report: {str(e)}")
@router.get("/studies/{study_id}/report")
async def get_study_report(study_id: str):
"""
Get the STUDY_REPORT.md file content for a study
Args:
study_id: Study identifier
Returns:
JSON with the markdown content
"""
try:
study_dir = STUDIES_DIR / study_id
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
# Look for STUDY_REPORT.md in the study root
report_path = study_dir / "STUDY_REPORT.md"
if not report_path.exists():
raise HTTPException(status_code=404, detail="No STUDY_REPORT.md found for this study")
with open(report_path, 'r', encoding='utf-8') as f:
content = f.read()
return {
"content": content,
"path": str(report_path),
"study_id": study_id
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to read study report: {str(e)}")