feat: Add Study Insights module (SYS_16) for physics visualizations

Introduces a new plugin architecture for study-specific physics
visualizations, separating "optimizer perspective" (Analysis) from
"engineer perspective" (Insights).

New module: optimization_engine/insights/
- base.py: StudyInsight base class, InsightConfig, InsightResult, registry
- zernike_wfe.py: Mirror WFE with 3D surface and Zernike decomposition
- stress_field.py: Von Mises stress contours with safety factors
- modal_analysis.py: Natural frequencies and mode shapes
- thermal_field.py: Temperature distribution visualization
- design_space.py: Parameter-objective landscape exploration

Features:
- 5 insight types: zernike_wfe, stress_field, modal, thermal, design_space
- CLI: python -m optimization_engine.insights generate <study>
- Standalone HTML generation with Plotly
- Enhanced Zernike viz: Turbo colorscale, smooth shading, 0.5x AMP
- Dashboard API fix: Added include_coefficients param to extract_relative()

Documentation:
- docs/protocols/system/SYS_16_STUDY_INSIGHTS.md
- Updated ATOMIZER_CONTEXT.md (v1.7)
- Updated 01_CHEATSHEET.md with insights section

Tools:
- tools/zernike_html_generator.py: Standalone WFE HTML generator
- tools/analyze_wfe.bat: Double-click to analyze OP2 files

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-20 13:46:28 -05:00
parent 01a7d7d121
commit 1612991d0d
15 changed files with 4450 additions and 173 deletions

View File

@@ -34,12 +34,52 @@ def get_results_dir(study_dir: Path) -> Path:
return results_dir
def resolve_study_path(study_id: str) -> Path:
"""Find study folder by scanning all topic directories.
Supports nested folder structure: studies/Topic/study_name/
Study ID is the short name (e.g., 'm1_mirror_adaptive_V14')
Returns the full path to the study directory.
Raises HTTPException 404 if not found.
"""
# First check direct path (backwards compatibility for flat structure)
direct_path = STUDIES_DIR / study_id
if direct_path.exists() and direct_path.is_dir():
# Verify it's actually a study (has 1_setup or config)
if (direct_path / "1_setup").exists() or (direct_path / "optimization_config.json").exists():
return direct_path
# Scan topic folders for nested structure
for topic_dir in STUDIES_DIR.iterdir():
if topic_dir.is_dir() and not topic_dir.name.startswith('.'):
study_dir = topic_dir / study_id
if study_dir.exists() and study_dir.is_dir():
# Verify it's actually a study
if (study_dir / "1_setup").exists() or (study_dir / "optimization_config.json").exists():
return study_dir
raise HTTPException(status_code=404, detail=f"Study not found: {study_id}")
def get_study_topic(study_dir: Path) -> Optional[str]:
"""Get the topic folder name for a study, or None if in root."""
# Check if parent is a topic folder (not the root studies dir)
parent = study_dir.parent
if parent != STUDIES_DIR and parent.parent == STUDIES_DIR:
return parent.name
return None
def is_optimization_running(study_id: str) -> bool:
"""Check if an optimization process is currently running for a study.
Looks for Python processes running run_optimization.py with the study_id in the command line.
"""
study_dir = STUDIES_DIR / study_id
try:
study_dir = resolve_study_path(study_id)
except HTTPException:
return False
for proc in psutil.process_iter(['pid', 'name', 'cmdline', 'cwd']):
try:
@@ -91,130 +131,168 @@ def get_accurate_study_status(study_id: str, trial_count: int, total_trials: int
return "paused"
def _load_study_info(study_dir: Path, topic: Optional[str] = None) -> Optional[dict]:
"""Load study info from a study directory. Returns None if not a valid study."""
# Look for optimization config (check multiple locations)
config_file = study_dir / "optimization_config.json"
if not config_file.exists():
config_file = study_dir / "1_setup" / "optimization_config.json"
if not config_file.exists():
return None
# Load config
with open(config_file) as f:
config = json.load(f)
# Check if results directory exists (support both 2_results and 3_results)
results_dir = study_dir / "2_results"
if not results_dir.exists():
results_dir = study_dir / "3_results"
# Check for Optuna database (Protocol 10) or JSON history (other protocols)
study_db = results_dir / "study.db"
history_file = results_dir / "optimization_history_incremental.json"
trial_count = 0
best_value = None
has_db = False
# Protocol 10: Read from Optuna SQLite database
if study_db.exists():
has_db = True
try:
# Use timeout to avoid blocking on locked databases
conn = sqlite3.connect(str(study_db), timeout=2.0)
cursor = conn.cursor()
# Get trial count and status
cursor.execute("SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'")
trial_count = cursor.fetchone()[0]
# Get best trial (for single-objective, or first objective for multi-objective)
if trial_count > 0:
cursor.execute("""
SELECT value FROM trial_values
WHERE trial_id IN (
SELECT trial_id FROM trials WHERE state = 'COMPLETE'
)
ORDER BY value ASC
LIMIT 1
""")
result = cursor.fetchone()
if result:
best_value = result[0]
conn.close()
except Exception as e:
print(f"Warning: Failed to read Optuna database for {study_dir.name}: {e}")
# Legacy: Read from JSON history
elif history_file.exists():
has_db = True
with open(history_file) as f:
history = json.load(f)
trial_count = len(history)
if history:
# Find best trial
best_trial = min(history, key=lambda x: x['objective'])
best_value = best_trial['objective']
# Get total trials from config (supports both formats)
total_trials = (
config.get('optimization_settings', {}).get('n_trials') or
config.get('optimization', {}).get('n_trials') or
config.get('trials', {}).get('n_trials', 50)
)
# Get accurate status using process detection
status = get_accurate_study_status(study_dir.name, trial_count, total_trials, has_db)
# Get creation date from directory or config modification time
created_at = None
try:
# First try to get from database (most accurate)
if study_db.exists():
created_at = datetime.fromtimestamp(study_db.stat().st_mtime).isoformat()
elif config_file.exists():
created_at = datetime.fromtimestamp(config_file.stat().st_mtime).isoformat()
else:
created_at = datetime.fromtimestamp(study_dir.stat().st_ctime).isoformat()
except:
created_at = None
# Get last modified time
last_modified = None
try:
if study_db.exists():
last_modified = datetime.fromtimestamp(study_db.stat().st_mtime).isoformat()
elif history_file.exists():
last_modified = datetime.fromtimestamp(history_file.stat().st_mtime).isoformat()
except:
last_modified = None
return {
"id": study_dir.name,
"name": study_dir.name.replace("_", " ").title(),
"topic": topic, # NEW: topic field for grouping
"status": status,
"progress": {
"current": trial_count,
"total": total_trials
},
"best_value": best_value,
"target": config.get('target', {}).get('value'),
"path": str(study_dir),
"created_at": created_at,
"last_modified": last_modified
}
@router.get("/studies")
async def list_studies():
"""List all available optimization studies"""
"""List all available optimization studies.
Supports both flat and nested folder structures:
- Flat: studies/study_name/
- Nested: studies/Topic/study_name/
Returns studies with 'topic' field for frontend grouping.
"""
try:
studies = []
if not STUDIES_DIR.exists():
return {"studies": []}
for study_dir in STUDIES_DIR.iterdir():
if not study_dir.is_dir():
for item in STUDIES_DIR.iterdir():
if not item.is_dir():
continue
if item.name.startswith('.'):
continue
# Look for optimization config (check multiple locations)
config_file = study_dir / "optimization_config.json"
if not config_file.exists():
config_file = study_dir / "1_setup" / "optimization_config.json"
if not config_file.exists():
continue
# Check if this is a study (flat structure) or a topic folder (nested structure)
is_study = (item / "1_setup").exists() or (item / "optimization_config.json").exists()
# Load config
with open(config_file) as f:
config = json.load(f)
if is_study:
# Flat structure: study directly in studies/
study_info = _load_study_info(item, topic=None)
if study_info:
studies.append(study_info)
else:
# Nested structure: this might be a topic folder
# Check if it contains study subdirectories
for sub_item in item.iterdir():
if not sub_item.is_dir():
continue
if sub_item.name.startswith('.'):
continue
# Check if results directory exists (support both 2_results and 3_results)
results_dir = study_dir / "2_results"
if not results_dir.exists():
results_dir = study_dir / "3_results"
# Check for Optuna database (Protocol 10) or JSON history (other protocols)
study_db = results_dir / "study.db"
history_file = results_dir / "optimization_history_incremental.json"
trial_count = 0
best_value = None
has_db = False
# Protocol 10: Read from Optuna SQLite database
if study_db.exists():
has_db = True
try:
# Use timeout to avoid blocking on locked databases
conn = sqlite3.connect(str(study_db), timeout=2.0)
cursor = conn.cursor()
# Get trial count and status
cursor.execute("SELECT COUNT(*) FROM trials WHERE state = 'COMPLETE'")
trial_count = cursor.fetchone()[0]
# Get best trial (for single-objective, or first objective for multi-objective)
if trial_count > 0:
cursor.execute("""
SELECT value FROM trial_values
WHERE trial_id IN (
SELECT trial_id FROM trials WHERE state = 'COMPLETE'
)
ORDER BY value ASC
LIMIT 1
""")
result = cursor.fetchone()
if result:
best_value = result[0]
conn.close()
except Exception as e:
print(f"Warning: Failed to read Optuna database for {study_dir.name}: {e}")
# Legacy: Read from JSON history
elif history_file.exists():
has_db = True
with open(history_file) as f:
history = json.load(f)
trial_count = len(history)
if history:
# Find best trial
best_trial = min(history, key=lambda x: x['objective'])
best_value = best_trial['objective']
# Get total trials from config (supports both formats)
total_trials = (
config.get('optimization_settings', {}).get('n_trials') or
config.get('trials', {}).get('n_trials', 50)
)
# Get accurate status using process detection
status = get_accurate_study_status(study_dir.name, trial_count, total_trials, has_db)
# Get creation date from directory or config modification time
created_at = None
try:
# First try to get from database (most accurate)
if study_db.exists():
created_at = datetime.fromtimestamp(study_db.stat().st_mtime).isoformat()
elif config_file.exists():
created_at = datetime.fromtimestamp(config_file.stat().st_mtime).isoformat()
else:
created_at = datetime.fromtimestamp(study_dir.stat().st_ctime).isoformat()
except:
created_at = None
# Get last modified time
last_modified = None
try:
if study_db.exists():
last_modified = datetime.fromtimestamp(study_db.stat().st_mtime).isoformat()
elif history_file.exists():
last_modified = datetime.fromtimestamp(history_file.stat().st_mtime).isoformat()
except:
last_modified = None
studies.append({
"id": study_dir.name,
"name": study_dir.name.replace("_", " ").title(),
"status": status,
"progress": {
"current": trial_count,
"total": total_trials
},
"best_value": best_value,
"target": config.get('target', {}).get('value'),
"path": str(study_dir),
"created_at": created_at,
"last_modified": last_modified
})
# Check if this subdirectory is a study
sub_is_study = (sub_item / "1_setup").exists() or (sub_item / "optimization_config.json").exists()
if sub_is_study:
study_info = _load_study_info(sub_item, topic=item.name)
if study_info:
studies.append(study_info)
return {"studies": studies}
@@ -225,7 +303,7 @@ async def list_studies():
async def get_study_status(study_id: str):
"""Get detailed status of a specific study"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -354,7 +432,7 @@ async def get_study_status(study_id: str):
async def get_optimization_history(study_id: str, limit: Optional[int] = None):
"""Get optimization history (all trials)"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
results_dir = get_results_dir(study_dir)
study_db = results_dir / "study.db"
history_file = results_dir / "optimization_history_incremental.json"
@@ -493,7 +571,7 @@ async def get_optimization_history(study_id: str, limit: Optional[int] = None):
async def get_pruning_history(study_id: str):
"""Get pruning diagnostics from Optuna database or legacy JSON file"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
results_dir = get_results_dir(study_dir)
study_db = results_dir / "study.db"
pruning_file = results_dir / "pruning_history.json"
@@ -593,7 +671,7 @@ def _infer_objective_unit(objective: Dict) -> str:
async def get_study_metadata(study_id: str):
"""Read optimization_config.json for objectives, design vars, units (Protocol 13)"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -631,7 +709,7 @@ async def get_study_metadata(study_id: str):
async def get_optimizer_state(study_id: str):
"""Read realtime optimizer state from intelligent_optimizer/ (Protocol 13)"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
results_dir = get_results_dir(study_dir)
state_file = results_dir / "intelligent_optimizer" / "optimizer_state.json"
@@ -652,7 +730,7 @@ async def get_optimizer_state(study_id: str):
async def get_pareto_front(study_id: str):
"""Get Pareto-optimal solutions for multi-objective studies (Protocol 13)"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
results_dir = get_results_dir(study_dir)
study_db = results_dir / "study.db"
@@ -696,7 +774,7 @@ async def get_pareto_front(study_id: str):
async def get_nn_pareto_front(study_id: str):
"""Get NN surrogate Pareto front from nn_pareto_front.json"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
results_dir = get_results_dir(study_dir)
nn_pareto_file = results_dir / "nn_pareto_front.json"
@@ -741,7 +819,7 @@ async def get_nn_pareto_front(study_id: str):
async def get_nn_optimization_state(study_id: str):
"""Get NN optimization state/summary from nn_optimization_state.json"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
results_dir = get_results_dir(study_dir)
nn_state_file = results_dir / "nn_optimization_state.json"
@@ -858,7 +936,7 @@ async def convert_study_mesh(study_id: str):
Creates a web-viewable 3D model with FEA results as vertex colors
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -896,7 +974,7 @@ async def get_mesh_file(study_id: str, filename: str):
if '..' in filename or '/' in filename or '\\' in filename:
raise HTTPException(status_code=400, detail="Invalid filename")
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
visualization_dir = study_dir / "3_visualization"
file_path = visualization_dir / filename
@@ -936,7 +1014,7 @@ async def get_optuna_dashboard_url(study_id: str):
sqlite:///studies/{study_id}/2_results/study.db
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -991,7 +1069,7 @@ async def generate_report(
Information about the generated report including download URL
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1048,7 +1126,7 @@ async def download_report(study_id: str, filename: str):
if '..' in filename or '/' in filename or '\\' in filename:
raise HTTPException(status_code=400, detail="Invalid filename")
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
results_dir = get_results_dir(study_dir)
file_path = results_dir / filename
@@ -1093,7 +1171,7 @@ async def get_console_output(study_id: str, lines: int = 200):
JSON with console output lines
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1158,7 +1236,7 @@ async def get_study_report(study_id: str):
JSON with the markdown content
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1200,7 +1278,7 @@ async def get_study_readme(study_id: str):
JSON with the markdown content
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1257,6 +1335,71 @@ async def get_study_readme(study_id: str):
raise HTTPException(status_code=500, detail=f"Failed to read README: {str(e)}")
@router.get("/studies/{study_id}/image/{image_path:path}")
async def get_study_image(study_id: str, image_path: str):
"""
Serve images from a study directory.
Supports images in:
- study_dir/image.png
- study_dir/1_setup/image.png
- study_dir/3_results/image.png
- study_dir/assets/image.png
Args:
study_id: Study identifier
image_path: Relative path to the image within the study
Returns:
FileResponse with the image
"""
try:
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
# Sanitize path to prevent directory traversal
image_path = image_path.replace('..', '').lstrip('/')
# Try multiple locations for the image
possible_paths = [
study_dir / image_path,
study_dir / "1_setup" / image_path,
study_dir / "3_results" / image_path,
study_dir / "2_results" / image_path,
study_dir / "assets" / image_path,
]
image_file = None
for path in possible_paths:
if path.exists() and path.is_file():
image_file = path
break
if image_file is None:
raise HTTPException(status_code=404, detail=f"Image not found: {image_path}")
# Determine media type
suffix = image_file.suffix.lower()
media_types = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.svg': 'image/svg+xml',
'.webp': 'image/webp',
}
media_type = media_types.get(suffix, 'application/octet-stream')
return FileResponse(image_file, media_type=media_type)
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to serve image: {str(e)}")
@router.get("/studies/{study_id}/config")
async def get_study_config(study_id: str):
"""
@@ -1269,7 +1412,7 @@ async def get_study_config(study_id: str):
JSON with the complete configuration
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1306,7 +1449,7 @@ _running_processes: Dict[str, int] = {}
def _find_optimization_process(study_id: str) -> Optional[psutil.Process]:
"""Find a running optimization process for a given study"""
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
for proc in psutil.process_iter(['pid', 'name', 'cmdline', 'cwd']):
try:
@@ -1335,7 +1478,7 @@ async def get_process_status(study_id: str):
JSON with process status (is_running, pid, iteration counts)
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1430,7 +1573,7 @@ async def start_optimization(study_id: str, request: StartOptimizationRequest =
JSON with process info
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1505,7 +1648,7 @@ async def stop_optimization(study_id: str, request: StopRequest = None):
request = StopRequest()
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1606,7 +1749,7 @@ async def validate_optimization(study_id: str, request: ValidateRequest = None):
JSON with process info
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1684,7 +1827,7 @@ async def launch_optuna_dashboard(study_id: str):
return s.connect_ex(('localhost', port)) == 0
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1717,18 +1860,13 @@ async def launch_optuna_dashboard(study_id: str):
"message": "Optuna dashboard already running on port 8081"
}
# Launch optuna-dashboard using Python script
python_exe = sys.executable
# Launch optuna-dashboard using CLI command (more robust than Python import)
# Use absolute path with POSIX format for SQLite URL
abs_db_path = study_db.absolute().as_posix()
storage_url = f"sqlite:///{abs_db_path}"
# Create a small Python script to run optuna-dashboard
launch_script = f'''
from optuna_dashboard import run_server
run_server("{storage_url}", host="0.0.0.0", port={port})
'''
cmd = [python_exe, "-c", launch_script]
# Use optuna-dashboard CLI command directly
cmd = ["optuna-dashboard", storage_url, "--port", str(port), "--host", "0.0.0.0"]
# On Windows, use CREATE_NEW_PROCESS_GROUP and DETACHED_PROCESS flags
import platform
@@ -1817,7 +1955,7 @@ async def get_model_files(study_id: str):
JSON with list of model files and their paths
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1896,7 +2034,7 @@ async def open_model_folder(study_id: str, folder_type: str = "model"):
import platform
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -1948,7 +2086,7 @@ async def open_model_folder(study_id: str, folder_type: str = "model"):
async def get_best_solution(study_id: str):
"""Get the best trial(s) for a study with improvement metrics"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study '{study_id}' not found")
@@ -2081,7 +2219,7 @@ async def get_study_runs(study_id: str):
This endpoint returns metrics for each sub-study.
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study '{study_id}' not found")
@@ -2191,7 +2329,7 @@ async def update_study_config(study_id: str, request: UpdateConfigRequest):
JSON with success status
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
@@ -2245,7 +2383,7 @@ async def get_zernike_available_trials(study_id: str):
JSON with list of trial numbers that have iteration folders with OP2 files
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study '{study_id}' not found")
@@ -2301,7 +2439,7 @@ async def get_trial_zernike(study_id: str, trial_number: int):
JSON with HTML content for each comparison, or error if OP2 not found
"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study '{study_id}' not found")
@@ -2346,10 +2484,11 @@ async def get_trial_zernike(study_id: str, trial_number: int):
# Configuration
N_MODES = 50
AMP = 50.0 # Increased from 2.0 for better visibility
PANCAKE = 5.0 # Reduced from 10.0 for more Z range
AMP = 0.5 # Reduced deformation scaling (0.5x)
PANCAKE = 3.0 # Z-axis range multiplier
PLOT_DOWNSAMPLE = 5000 # Reduced for faster loading
FILTER_LOW_ORDERS = 4
COLORSCALE = 'Turbo' # Colorscale: 'RdBu_r', 'Viridis', 'Plasma', 'Turbo'
SUBCASE_MAP = {
'1': '90', '2': '20', '3': '40', '4': '60',
@@ -2465,7 +2604,7 @@ async def get_trial_zernike(study_id: str, trial_number: int):
res_amp = AMP * Wp
max_amp = float(np.max(np.abs(res_amp))) if res_amp.size else 1.0
# Create SURFACE mesh (not just points)
# Create smooth shaded SURFACE mesh with lighting
surface_trace = None
try:
tri = Triangulation(Xp, Yp)
@@ -2475,11 +2614,25 @@ async def get_trial_zernike(study_id: str, trial_number: int):
x=Xp.tolist(), y=Yp.tolist(), z=res_amp.tolist(),
i=i_idx.tolist(), j=j_idx.tolist(), k=k_idx.tolist(),
intensity=res_amp.tolist(),
colorscale='RdBu',
opacity=0.95,
flatshading=True,
colorscale=COLORSCALE,
opacity=1.0,
flatshading=False, # Smooth shading
lighting=dict(
ambient=0.4,
diffuse=0.8,
specular=0.3,
roughness=0.5,
fresnel=0.2
),
lightposition=dict(x=100, y=200, z=300),
showscale=True,
colorbar=dict(title="Residual (nm)", titleside='right', len=0.6)
colorbar=dict(
title=dict(text="Residual (nm)", side='right'),
thickness=15,
len=0.6,
tickformat=".1f"
),
hovertemplate="X: %{x:.1f}<br>Y: %{y:.1f}<br>Residual: %{z:.2f} nm<extra></extra>"
)
except Exception as e:
print(f"Triangulation failed: {e}")
@@ -2534,14 +2687,39 @@ async def get_trial_zernike(study_id: str, trial_number: int):
fig.add_trace(go.Scatter3d(
x=Xp.tolist(), y=Yp.tolist(), z=res_amp.tolist(),
mode='markers',
marker=dict(size=2, color=res_amp.tolist(), colorscale='RdBu', showscale=True),
marker=dict(size=2, color=res_amp.tolist(), colorscale=COLORSCALE, showscale=True),
showlegend=False
), row=1, col=1)
fig.update_scenes(
camera=dict(eye=dict(x=0.8, y=0.8, z=0.6)),
zaxis=dict(range=[-max_amp * PANCAKE, max_amp * PANCAKE]),
aspectmode='data'
camera=dict(
eye=dict(x=1.2, y=1.2, z=0.8),
up=dict(x=0, y=0, z=1)
),
xaxis=dict(
title="X (mm)",
showgrid=True,
gridcolor='rgba(128,128,128,0.3)',
showbackground=True,
backgroundcolor='rgba(240,240,240,0.9)'
),
yaxis=dict(
title="Y (mm)",
showgrid=True,
gridcolor='rgba(128,128,128,0.3)',
showbackground=True,
backgroundcolor='rgba(240,240,240,0.9)'
),
zaxis=dict(
title="Residual (nm)",
range=[-max_amp * PANCAKE, max_amp * PANCAKE],
showgrid=True,
gridcolor='rgba(128,128,128,0.3)',
showbackground=True,
backgroundcolor='rgba(230,230,250,0.9)'
),
aspectmode='manual',
aspectratio=dict(x=1, y=1, z=0.4)
)
# Row 2: RMS table with all metrics
@@ -2805,7 +2983,7 @@ async def get_trial_zernike(study_id: str, trial_number: int):
async def export_study_data(study_id: str, format: str):
"""Export study data in various formats: csv, json, excel"""
try:
study_dir = STUDIES_DIR / study_id
study_dir = resolve_study_path(study_id)
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study '{study_id}' not found")