feat: Add dashboard chat integration and MCP server

Major changes:
- Dashboard: WebSocket-based chat with session management
- Dashboard: New chat components (ChatPane, ChatInput, ModeToggle)
- Dashboard: Enhanced UI with parallel coordinates chart
- MCP Server: New atomizer-tools server for Claude integration
- Extractors: Enhanced Zernike OPD extractor
- Reports: Improved report generator

New studies (configs and scripts only):
- M1 Mirror: Cost reduction campaign studies
- Simple Beam, Simple Bracket, UAV Arm studies

Note: Large iteration data (2_iterations/, best_design_archive/)
excluded via .gitignore - kept on local Gitea only.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-13 15:53:55 -05:00
parent 69c0d76b50
commit 73a7b9d9f1
1680 changed files with 144922 additions and 723 deletions

View File

@@ -45,11 +45,18 @@ export interface ConfigResponse {
export interface ProcessStatus {
is_running: boolean;
is_paused?: boolean;
pid?: number;
start_time?: string;
iteration?: number;
fea_count?: number;
nn_count?: number;
total_trials?: number;
completed_trials?: number;
time_per_trial_seconds?: number;
eta_seconds?: number;
eta_formatted?: string;
rate_per_hour?: number;
}
export interface ModelFile {
@@ -144,6 +151,7 @@ class ApiClient {
async startOptimization(studyId: string, options?: {
freshStart?: boolean;
maxIterations?: number;
trials?: number; // For SAT scripts
feaBatchSize?: number;
tuneTrials?: number;
ensembleSize?: number;
@@ -172,6 +180,28 @@ class ApiClient {
return response.json();
}
async pauseOptimization(studyId: string): Promise<OptimizationControlResponse> {
const response = await fetch(`${API_BASE}/optimization/studies/${studyId}/pause`, {
method: 'POST',
});
if (!response.ok) {
const error = await response.json();
throw new Error(error.detail || 'Failed to pause optimization');
}
return response.json();
}
async resumeOptimization(studyId: string): Promise<OptimizationControlResponse> {
const response = await fetch(`${API_BASE}/optimization/studies/${studyId}/resume`, {
method: 'POST',
});
if (!response.ok) {
const error = await response.json();
throw new Error(error.detail || 'Failed to resume optimization');
}
return response.json();
}
async validateOptimization(studyId: string, options?: {
topN?: number;
}): Promise<OptimizationControlResponse> {
@@ -210,6 +240,57 @@ class ApiClient {
return response.json();
}
async checkOptunaAvailable(): Promise<{
available: boolean;
path: string | null;
message: string;
install_instructions?: string;
}> {
const response = await fetch(`${API_BASE}/optimization/optuna-dashboard/check`);
if (!response.ok) {
return {
available: false,
path: null,
message: 'Failed to check optuna-dashboard availability',
install_instructions: 'pip install optuna-dashboard'
};
}
return response.json();
}
// Optimizer state
async getOptimizerState(studyId: string): Promise<{
available: boolean;
source?: string;
phase?: string;
phase_description?: string;
phase_progress?: number;
current_strategy?: string;
sampler?: {
name: string;
description: string;
};
objectives?: Array<{
name: string;
direction: string;
current_best?: number;
unit?: string;
}>;
plan?: {
total_phases: number;
current_phase: number;
phases: string[];
};
completed_trials?: number;
total_trials?: number;
}> {
const response = await fetch(`${API_BASE}/optimization/studies/${studyId}/optimizer-state`);
if (!response.ok) {
return { available: false };
}
return response.json();
}
// Model files
async getModelFiles(studyId: string): Promise<ModelFilesResponse> {
const response = await fetch(`${API_BASE}/optimization/studies/${studyId}/model-files`);