feat: Add Protocol 13 adaptive optimization, Plotly charts, and dashboard improvements

## Protocol 13: Adaptive Multi-Objective Optimization
- Iterative FEA + Neural Network surrogate workflow
- Initial FEA sampling, NN training, NN-accelerated search
- FEA validation of top NN predictions, retraining loop
- adaptive_state.json tracks iteration history and best values
- M1 mirror study (V11) with 103 FEA, 3000 NN trials

## Dashboard Visualization Enhancements
- Added Plotly.js interactive charts (parallel coords, Pareto, convergence)
- Lazy loading with React.lazy() for performance
- Code splitting: plotly.js-basic-dist (~1MB vs 3.5MB)
- Chart library toggle (Recharts default, Plotly on-demand)
- ExpandableChart component for full-screen modal views
- ConsoleOutput component for real-time log viewing

## Documentation
- Protocol 13 detailed documentation
- Dashboard visualization guide
- Plotly components README
- Updated run-optimization skill with Mode 5 (adaptive)

## Bug Fixes
- Fixed TypeScript errors in dashboard components
- Fixed Card component to accept ReactNode title
- Removed unused imports across components

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Antoine
2025-12-04 07:41:54 -05:00
parent e74f1ccf36
commit 8cbdbcad78
270 changed files with 15471 additions and 517 deletions

View File

@@ -287,18 +287,21 @@ async def get_optimization_history(study_id: str, limit: Optional[int] = None):
conn = sqlite3.connect(str(study_db))
cursor = conn.cursor()
# Get all completed trials
# Get all completed trials FROM ALL STUDIES in the database
# This handles adaptive optimizations that create multiple Optuna studies
# (e.g., v11_fea for FEA trials, v11_iter1_nn for NN trials, etc.)
cursor.execute("""
SELECT trial_id, number, datetime_start, datetime_complete
FROM trials
WHERE state = 'COMPLETE'
ORDER BY number DESC
SELECT t.trial_id, t.number, t.datetime_start, t.datetime_complete, s.study_name
FROM trials t
JOIN studies s ON t.study_id = s.study_id
WHERE t.state = 'COMPLETE'
ORDER BY t.datetime_start DESC
""" + (f" LIMIT {limit}" if limit else ""))
trial_rows = cursor.fetchall()
trials = []
for trial_id, trial_num, start_time, end_time in trial_rows:
for trial_id, trial_num, start_time, end_time, study_name in trial_rows:
# Get objectives for this trial
cursor.execute("""
SELECT value
@@ -363,15 +366,24 @@ async def get_optimization_history(study_id: str, limit: Optional[int] = None):
# Merge with params (prefer user_attrs design_vars if available)
final_design_vars = {**params, **design_vars_from_attrs} if design_vars_from_attrs else params
# Extract source for FEA vs NN differentiation
source = user_attrs.get("source", "FEA") # Default to FEA for legacy studies
# Use trial_id as unique identifier when multiple Optuna studies exist
# This avoids trial number collisions between studies
unique_trial_num = trial_id if study_name else trial_num
trials.append({
"trial_number": trial_num,
"trial_number": unique_trial_num,
"objective": values[0] if len(values) > 0 else None, # Primary objective
"objectives": values if len(values) > 1 else None, # All objectives for multi-objective
"design_variables": final_design_vars, # Use merged design vars
"results": results,
"user_attrs": user_attrs, # Include all user attributes
"source": source, # FEA or NN
"start_time": start_time,
"end_time": end_time
"end_time": end_time,
"study_name": study_name # Include for debugging
})
conn.close()
@@ -910,6 +922,72 @@ async def download_report(study_id: str, filename: str):
raise HTTPException(status_code=500, detail=f"Failed to download report: {str(e)}")
@router.get("/studies/{study_id}/console")
async def get_console_output(study_id: str, lines: int = 200):
"""
Get the latest console output/logs from the optimization run
Args:
study_id: Study identifier
lines: Number of lines to return (default: 200)
Returns:
JSON with console output lines
"""
try:
study_dir = STUDIES_DIR / study_id
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
# Look for log files in various locations
log_paths = [
study_dir / "optimization.log",
study_dir / "2_results" / "optimization.log",
study_dir / "3_results" / "optimization.log",
study_dir / "run.log",
]
log_content = None
log_path_used = None
for log_path in log_paths:
if log_path.exists():
log_path_used = log_path
break
if log_path_used is None:
return {
"lines": [],
"total_lines": 0,
"log_file": None,
"message": "No log file found. Optimization may not have started yet."
}
# Read the last N lines efficiently
with open(log_path_used, 'r', encoding='utf-8', errors='replace') as f:
all_lines = f.readlines()
# Get last N lines
last_lines = all_lines[-lines:] if len(all_lines) > lines else all_lines
# Clean up lines (remove trailing newlines)
last_lines = [line.rstrip('\n\r') for line in last_lines]
return {
"lines": last_lines,
"total_lines": len(all_lines),
"displayed_lines": len(last_lines),
"log_file": str(log_path_used),
"timestamp": datetime.now().isoformat()
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to read console output: {str(e)}")
@router.get("/studies/{study_id}/report")
async def get_study_report(study_id: str):
"""