feat: Add Analysis page, run comparison, notifications, and config editor
Dashboard enhancements:
- Add Analysis page with tabs: Overview, Parameters, Pareto, Correlations, Constraints, Surrogate, Runs
- Add PlotlyCorrelationHeatmap for parameter-objective correlation analysis
- Add PlotlyFeasibilityChart for constraint satisfaction visualization
- Add PlotlySurrogateQuality for FEA vs NN prediction comparison
- Add PlotlyRunComparison for comparing optimization runs within a study
Real-time improvements:
- Replace watchdog file-watching with SQLite database polling for better Windows reliability
- Add DatabasePoller class with 2-second polling interval
- Enhanced WebSocket messages: trial_completed, new_best, pareto_update, progress
Desktop notifications:
- Add useNotifications hook using Web Notifications API
- Add NotificationSettings toggle component
- Notify users when new best solutions are found
Config editor:
- Add PUT /studies/{study_id}/config endpoint with auto-backup
- Add ConfigEditor modal with tabs: General, Variables, Objectives, Settings, JSON
- Prevents editing while optimization is running
Enhanced Pareto visualization:
- Add dark mode styling with transparent backgrounds
- Add stats bar showing Pareto, FEA, NN, and infeasible counts
- Add Pareto front connecting line for 2D view
- Add table showing top 10 Pareto-optimal solutions
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -1794,3 +1794,563 @@ run_server("{storage_url}", host="0.0.0.0", port={port})
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to launch Optuna dashboard: {str(e)}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Model Files Endpoint
|
||||
# ============================================================================
|
||||
|
||||
@router.get("/studies/{study_id}/model-files")
|
||||
async def get_model_files(study_id: str):
|
||||
"""
|
||||
Get list of NX model files (.prt, .sim, .fem, .bdf, .dat, .op2) for a study
|
||||
|
||||
Args:
|
||||
study_id: Study identifier
|
||||
|
||||
Returns:
|
||||
JSON with list of model files and their paths
|
||||
"""
|
||||
try:
|
||||
study_dir = STUDIES_DIR / study_id
|
||||
|
||||
if not study_dir.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
|
||||
|
||||
# Look for model directory (check multiple locations)
|
||||
model_dirs = [
|
||||
study_dir / "1_setup" / "model",
|
||||
study_dir / "model",
|
||||
study_dir / "1_setup",
|
||||
study_dir
|
||||
]
|
||||
|
||||
model_files = []
|
||||
model_dir_path = None
|
||||
|
||||
# NX and FEA file extensions to look for
|
||||
nx_extensions = {'.prt', '.sim', '.fem', '.bdf', '.dat', '.op2', '.f06', '.inp'}
|
||||
|
||||
for model_dir in model_dirs:
|
||||
if model_dir.exists() and model_dir.is_dir():
|
||||
for file_path in model_dir.iterdir():
|
||||
if file_path.is_file() and file_path.suffix.lower() in nx_extensions:
|
||||
model_files.append({
|
||||
"name": file_path.name,
|
||||
"path": str(file_path),
|
||||
"extension": file_path.suffix.lower(),
|
||||
"size_bytes": file_path.stat().st_size,
|
||||
"size_display": _format_file_size(file_path.stat().st_size),
|
||||
"modified": datetime.fromtimestamp(file_path.stat().st_mtime).isoformat()
|
||||
})
|
||||
if model_dir_path is None:
|
||||
model_dir_path = str(model_dir)
|
||||
|
||||
# Sort by extension for better display (prt first, then sim, fem, etc.)
|
||||
extension_order = {'.prt': 0, '.sim': 1, '.fem': 2, '.bdf': 3, '.dat': 4, '.op2': 5, '.f06': 6, '.inp': 7}
|
||||
model_files.sort(key=lambda x: (extension_order.get(x['extension'], 99), x['name']))
|
||||
|
||||
return {
|
||||
"study_id": study_id,
|
||||
"model_dir": model_dir_path or str(study_dir / "1_setup" / "model"),
|
||||
"files": model_files,
|
||||
"count": len(model_files)
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get model files: {str(e)}")
|
||||
|
||||
|
||||
def _format_file_size(size_bytes: int) -> str:
|
||||
"""Format file size in human-readable form"""
|
||||
if size_bytes < 1024:
|
||||
return f"{size_bytes} B"
|
||||
elif size_bytes < 1024 * 1024:
|
||||
return f"{size_bytes / 1024:.1f} KB"
|
||||
elif size_bytes < 1024 * 1024 * 1024:
|
||||
return f"{size_bytes / (1024 * 1024):.1f} MB"
|
||||
else:
|
||||
return f"{size_bytes / (1024 * 1024 * 1024):.2f} GB"
|
||||
|
||||
|
||||
@router.post("/studies/{study_id}/open-folder")
|
||||
async def open_model_folder(study_id: str, folder_type: str = "model"):
|
||||
"""
|
||||
Open the model folder in system file explorer
|
||||
|
||||
Args:
|
||||
study_id: Study identifier
|
||||
folder_type: Type of folder to open (model, results, setup)
|
||||
|
||||
Returns:
|
||||
JSON with success status
|
||||
"""
|
||||
import os
|
||||
import platform
|
||||
|
||||
try:
|
||||
study_dir = STUDIES_DIR / study_id
|
||||
|
||||
if not study_dir.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
|
||||
|
||||
# Determine which folder to open
|
||||
if folder_type == "model":
|
||||
target_dir = study_dir / "1_setup" / "model"
|
||||
if not target_dir.exists():
|
||||
target_dir = study_dir / "1_setup"
|
||||
elif folder_type == "results":
|
||||
target_dir = get_results_dir(study_dir)
|
||||
elif folder_type == "setup":
|
||||
target_dir = study_dir / "1_setup"
|
||||
else:
|
||||
target_dir = study_dir
|
||||
|
||||
if not target_dir.exists():
|
||||
target_dir = study_dir
|
||||
|
||||
# Open in file explorer based on platform
|
||||
system = platform.system()
|
||||
try:
|
||||
if system == "Windows":
|
||||
os.startfile(str(target_dir))
|
||||
elif system == "Darwin": # macOS
|
||||
subprocess.Popen(["open", str(target_dir)])
|
||||
else: # Linux
|
||||
subprocess.Popen(["xdg-open", str(target_dir)])
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Opened {target_dir}",
|
||||
"path": str(target_dir)
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Failed to open folder: {str(e)}",
|
||||
"path": str(target_dir)
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to open folder: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/studies/{study_id}/best-solution")
|
||||
async def get_best_solution(study_id: str):
|
||||
"""Get the best trial(s) for a study with improvement metrics"""
|
||||
try:
|
||||
study_dir = STUDIES_DIR / study_id
|
||||
if not study_dir.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Study '{study_id}' not found")
|
||||
|
||||
results_dir = get_results_dir(study_dir)
|
||||
db_path = results_dir / "study.db"
|
||||
|
||||
if not db_path.exists():
|
||||
return {
|
||||
"study_id": study_id,
|
||||
"best_trial": None,
|
||||
"first_trial": None,
|
||||
"improvements": {},
|
||||
"total_trials": 0
|
||||
}
|
||||
|
||||
conn = sqlite3.connect(str(db_path))
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Get best trial (single objective - minimize by default)
|
||||
cursor.execute("""
|
||||
SELECT t.trial_id, t.number, tv.value as objective,
|
||||
datetime(tv.value_id, 'unixepoch') as timestamp
|
||||
FROM trials t
|
||||
JOIN trial_values tv ON t.trial_id = tv.trial_id
|
||||
WHERE t.state = 'COMPLETE'
|
||||
ORDER BY tv.value ASC
|
||||
LIMIT 1
|
||||
""")
|
||||
best_row = cursor.fetchone()
|
||||
|
||||
# Get first completed trial for comparison
|
||||
cursor.execute("""
|
||||
SELECT t.trial_id, t.number, tv.value as objective
|
||||
FROM trials t
|
||||
JOIN trial_values tv ON t.trial_id = tv.trial_id
|
||||
WHERE t.state = 'COMPLETE'
|
||||
ORDER BY t.number ASC
|
||||
LIMIT 1
|
||||
""")
|
||||
first_row = cursor.fetchone()
|
||||
|
||||
# Get total trial count
|
||||
cursor.execute("SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'")
|
||||
total_trials = cursor.fetchone()[0]
|
||||
|
||||
best_trial = None
|
||||
first_trial = None
|
||||
improvements = {}
|
||||
|
||||
if best_row:
|
||||
best_trial_id = best_row['trial_id']
|
||||
|
||||
# Get design variables
|
||||
cursor.execute("""
|
||||
SELECT param_name, param_value
|
||||
FROM trial_params
|
||||
WHERE trial_id = ?
|
||||
""", (best_trial_id,))
|
||||
params = {row['param_name']: row['param_value'] for row in cursor.fetchall()}
|
||||
|
||||
# Get user attributes (including results)
|
||||
cursor.execute("""
|
||||
SELECT key, value_json
|
||||
FROM trial_user_attributes
|
||||
WHERE trial_id = ?
|
||||
""", (best_trial_id,))
|
||||
user_attrs = {}
|
||||
for row in cursor.fetchall():
|
||||
try:
|
||||
user_attrs[row['key']] = json.loads(row['value_json'])
|
||||
except:
|
||||
user_attrs[row['key']] = row['value_json']
|
||||
|
||||
best_trial = {
|
||||
"trial_number": best_row['number'],
|
||||
"objective": best_row['objective'],
|
||||
"design_variables": params,
|
||||
"user_attrs": user_attrs,
|
||||
"timestamp": best_row['timestamp']
|
||||
}
|
||||
|
||||
if first_row:
|
||||
first_trial_id = first_row['trial_id']
|
||||
|
||||
cursor.execute("""
|
||||
SELECT param_name, param_value
|
||||
FROM trial_params
|
||||
WHERE trial_id = ?
|
||||
""", (first_trial_id,))
|
||||
first_params = {row['param_name']: row['param_value'] for row in cursor.fetchall()}
|
||||
|
||||
first_trial = {
|
||||
"trial_number": first_row['number'],
|
||||
"objective": first_row['objective'],
|
||||
"design_variables": first_params
|
||||
}
|
||||
|
||||
# Calculate improvement
|
||||
if best_row and first_row['objective'] != 0:
|
||||
improvement_pct = ((first_row['objective'] - best_row['objective']) / abs(first_row['objective'])) * 100
|
||||
improvements["objective"] = {
|
||||
"initial": first_row['objective'],
|
||||
"final": best_row['objective'],
|
||||
"improvement_pct": round(improvement_pct, 2),
|
||||
"absolute_change": round(first_row['objective'] - best_row['objective'], 6)
|
||||
}
|
||||
|
||||
conn.close()
|
||||
|
||||
return {
|
||||
"study_id": study_id,
|
||||
"best_trial": best_trial,
|
||||
"first_trial": first_trial,
|
||||
"improvements": improvements,
|
||||
"total_trials": total_trials
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get best solution: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/studies/{study_id}/runs")
|
||||
async def get_study_runs(study_id: str):
|
||||
"""
|
||||
Get all optimization runs/studies in the database for comparison.
|
||||
Many studies have multiple Optuna studies (e.g., v11_fea, v11_iter1_nn, v11_iter2_nn).
|
||||
This endpoint returns metrics for each sub-study.
|
||||
"""
|
||||
try:
|
||||
study_dir = STUDIES_DIR / study_id
|
||||
if not study_dir.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Study '{study_id}' not found")
|
||||
|
||||
results_dir = get_results_dir(study_dir)
|
||||
db_path = results_dir / "study.db"
|
||||
|
||||
if not db_path.exists():
|
||||
return {"runs": [], "total_runs": 0}
|
||||
|
||||
conn = sqlite3.connect(str(db_path))
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Get all Optuna studies in this database
|
||||
cursor.execute("""
|
||||
SELECT study_id, study_name
|
||||
FROM studies
|
||||
ORDER BY study_id
|
||||
""")
|
||||
studies = cursor.fetchall()
|
||||
|
||||
runs = []
|
||||
for study_row in studies:
|
||||
optuna_study_id = study_row['study_id']
|
||||
study_name = study_row['study_name']
|
||||
|
||||
# Get trial count
|
||||
cursor.execute("""
|
||||
SELECT COUNT(*) FROM trials
|
||||
WHERE study_id = ? AND state = 'COMPLETE'
|
||||
""", (optuna_study_id,))
|
||||
trial_count = cursor.fetchone()[0]
|
||||
|
||||
if trial_count == 0:
|
||||
continue
|
||||
|
||||
# Get best value (first objective)
|
||||
cursor.execute("""
|
||||
SELECT MIN(tv.value) as best_value
|
||||
FROM trial_values tv
|
||||
JOIN trials t ON tv.trial_id = t.trial_id
|
||||
WHERE t.study_id = ? AND t.state = 'COMPLETE' AND tv.objective = 0
|
||||
""", (optuna_study_id,))
|
||||
best_result = cursor.fetchone()
|
||||
best_value = best_result['best_value'] if best_result else None
|
||||
|
||||
# Get average value
|
||||
cursor.execute("""
|
||||
SELECT AVG(tv.value) as avg_value
|
||||
FROM trial_values tv
|
||||
JOIN trials t ON tv.trial_id = t.trial_id
|
||||
WHERE t.study_id = ? AND t.state = 'COMPLETE' AND tv.objective = 0
|
||||
""", (optuna_study_id,))
|
||||
avg_result = cursor.fetchone()
|
||||
avg_value = avg_result['avg_value'] if avg_result else None
|
||||
|
||||
# Get time range
|
||||
cursor.execute("""
|
||||
SELECT MIN(datetime_start) as first_trial, MAX(datetime_complete) as last_trial
|
||||
FROM trials
|
||||
WHERE study_id = ? AND state = 'COMPLETE'
|
||||
""", (optuna_study_id,))
|
||||
time_result = cursor.fetchone()
|
||||
|
||||
# Determine source type (FEA or NN)
|
||||
source = "NN" if "_nn" in study_name.lower() else "FEA"
|
||||
|
||||
runs.append({
|
||||
"run_id": optuna_study_id,
|
||||
"name": study_name,
|
||||
"source": source,
|
||||
"trial_count": trial_count,
|
||||
"best_value": best_value,
|
||||
"avg_value": avg_value,
|
||||
"first_trial": time_result['first_trial'] if time_result else None,
|
||||
"last_trial": time_result['last_trial'] if time_result else None
|
||||
})
|
||||
|
||||
conn.close()
|
||||
|
||||
return {
|
||||
"runs": runs,
|
||||
"total_runs": len(runs),
|
||||
"study_id": study_id
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get runs: {str(e)}")
|
||||
|
||||
|
||||
class UpdateConfigRequest(BaseModel):
|
||||
config: dict
|
||||
|
||||
|
||||
@router.put("/studies/{study_id}/config")
|
||||
async def update_study_config(study_id: str, request: UpdateConfigRequest):
|
||||
"""
|
||||
Update the optimization_config.json for a study
|
||||
|
||||
Args:
|
||||
study_id: Study identifier
|
||||
request: New configuration data
|
||||
|
||||
Returns:
|
||||
JSON with success status
|
||||
"""
|
||||
try:
|
||||
study_dir = STUDIES_DIR / study_id
|
||||
|
||||
if not study_dir.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
|
||||
|
||||
# Check if optimization is running - don't allow config changes while running
|
||||
if is_optimization_running(study_id):
|
||||
raise HTTPException(
|
||||
status_code=409,
|
||||
detail="Cannot modify config while optimization is running. Stop the optimization first."
|
||||
)
|
||||
|
||||
# Find config file location
|
||||
config_file = study_dir / "1_setup" / "optimization_config.json"
|
||||
if not config_file.exists():
|
||||
config_file = study_dir / "optimization_config.json"
|
||||
|
||||
if not config_file.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Config file not found for study {study_id}")
|
||||
|
||||
# Backup existing config
|
||||
backup_file = config_file.with_suffix('.json.backup')
|
||||
shutil.copy(config_file, backup_file)
|
||||
|
||||
# Write new config
|
||||
with open(config_file, 'w') as f:
|
||||
json.dump(request.config, f, indent=2)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Configuration updated successfully",
|
||||
"path": str(config_file),
|
||||
"backup_path": str(backup_file)
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to update config: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/studies/{study_id}/export/{format}")
|
||||
async def export_study_data(study_id: str, format: str):
|
||||
"""Export study data in various formats: csv, json, excel"""
|
||||
try:
|
||||
study_dir = STUDIES_DIR / study_id
|
||||
if not study_dir.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Study '{study_id}' not found")
|
||||
|
||||
results_dir = get_results_dir(study_dir)
|
||||
db_path = results_dir / "study.db"
|
||||
|
||||
if not db_path.exists():
|
||||
raise HTTPException(status_code=404, detail="No study data available")
|
||||
|
||||
conn = sqlite3.connect(str(db_path))
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Get all completed trials with their params and values
|
||||
cursor.execute("""
|
||||
SELECT t.trial_id, t.number, tv.value as objective
|
||||
FROM trials t
|
||||
JOIN trial_values tv ON t.trial_id = tv.trial_id
|
||||
WHERE t.state = 'COMPLETE'
|
||||
ORDER BY t.number
|
||||
""")
|
||||
trials_data = []
|
||||
|
||||
for row in cursor.fetchall():
|
||||
trial_id = row['trial_id']
|
||||
|
||||
# Get params
|
||||
cursor.execute("""
|
||||
SELECT param_name, param_value
|
||||
FROM trial_params
|
||||
WHERE trial_id = ?
|
||||
""", (trial_id,))
|
||||
params = {r['param_name']: r['param_value'] for r in cursor.fetchall()}
|
||||
|
||||
# Get user attrs
|
||||
cursor.execute("""
|
||||
SELECT key, value_json
|
||||
FROM trial_user_attributes
|
||||
WHERE trial_id = ?
|
||||
""", (trial_id,))
|
||||
user_attrs = {}
|
||||
for r in cursor.fetchall():
|
||||
try:
|
||||
user_attrs[r['key']] = json.loads(r['value_json'])
|
||||
except:
|
||||
user_attrs[r['key']] = r['value_json']
|
||||
|
||||
trials_data.append({
|
||||
"trial_number": row['number'],
|
||||
"objective": row['objective'],
|
||||
"params": params,
|
||||
"user_attrs": user_attrs
|
||||
})
|
||||
|
||||
conn.close()
|
||||
|
||||
if format.lower() == "json":
|
||||
return JSONResponse(content={
|
||||
"study_id": study_id,
|
||||
"total_trials": len(trials_data),
|
||||
"trials": trials_data
|
||||
})
|
||||
|
||||
elif format.lower() == "csv":
|
||||
import io
|
||||
import csv
|
||||
|
||||
if not trials_data:
|
||||
return JSONResponse(content={"error": "No data to export"})
|
||||
|
||||
# Build CSV
|
||||
output = io.StringIO()
|
||||
|
||||
# Get all param names
|
||||
param_names = sorted(set(
|
||||
key for trial in trials_data
|
||||
for key in trial['params'].keys()
|
||||
))
|
||||
|
||||
fieldnames = ['trial_number', 'objective'] + param_names
|
||||
writer = csv.DictWriter(output, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
|
||||
for trial in trials_data:
|
||||
row_data = {
|
||||
'trial_number': trial['trial_number'],
|
||||
'objective': trial['objective']
|
||||
}
|
||||
row_data.update(trial['params'])
|
||||
writer.writerow(row_data)
|
||||
|
||||
csv_content = output.getvalue()
|
||||
|
||||
return JSONResponse(content={
|
||||
"filename": f"{study_id}_data.csv",
|
||||
"content": csv_content,
|
||||
"content_type": "text/csv"
|
||||
})
|
||||
|
||||
elif format.lower() == "config":
|
||||
# Export optimization config
|
||||
setup_dir = study_dir / "1_setup"
|
||||
config_path = setup_dir / "optimization_config.json"
|
||||
|
||||
if config_path.exists():
|
||||
with open(config_path, 'r') as f:
|
||||
config = json.load(f)
|
||||
return JSONResponse(content={
|
||||
"filename": f"{study_id}_config.json",
|
||||
"content": json.dumps(config, indent=2),
|
||||
"content_type": "application/json"
|
||||
})
|
||||
else:
|
||||
raise HTTPException(status_code=404, detail="Config file not found")
|
||||
|
||||
else:
|
||||
raise HTTPException(status_code=400, detail=f"Unsupported format: {format}")
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to export data: {str(e)}")
|
||||
|
||||
Reference in New Issue
Block a user