feat: Major update with validators, skills, dashboard, and docs reorganization
- Add validation framework (config, model, results, study validators) - Add Claude Code skills (create-study, run-optimization, generate-report, troubleshoot, analyze-model) - Add Atomizer Dashboard (React frontend + FastAPI backend) - Reorganize docs into structured directories (00-09) - Add neural surrogate modules and training infrastructure - Add multi-objective optimization support 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
421
optimization_engine/validators/study_validator.py
Normal file
421
optimization_engine/validators/study_validator.py
Normal file
@@ -0,0 +1,421 @@
|
||||
"""
|
||||
Study Validator for Atomizer Optimization Studies
|
||||
|
||||
Comprehensive validation that combines config, model, and results validation
|
||||
to provide a complete health check for an optimization study.
|
||||
|
||||
Usage:
|
||||
from optimization_engine.validators.study_validator import validate_study
|
||||
|
||||
result = validate_study("uav_arm_optimization")
|
||||
print(result) # Shows complete status with all checks
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class StudyStatus(Enum):
|
||||
"""Overall status of a study."""
|
||||
NOT_FOUND = "not_found"
|
||||
SETUP_INCOMPLETE = "setup_incomplete"
|
||||
READY_TO_RUN = "ready_to_run"
|
||||
RUNNING = "running"
|
||||
COMPLETED = "completed"
|
||||
HAS_ERRORS = "has_errors"
|
||||
|
||||
|
||||
@dataclass
|
||||
class StudyCheckResult:
|
||||
"""Result of a single validation check."""
|
||||
name: str
|
||||
passed: bool
|
||||
message: str
|
||||
details: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StudyValidationResult:
|
||||
"""Complete validation result for a study."""
|
||||
study_name: str
|
||||
status: StudyStatus
|
||||
checks: List[StudyCheckResult]
|
||||
summary: Dict[str, Any]
|
||||
|
||||
@property
|
||||
def is_ready_to_run(self) -> bool:
|
||||
"""Check if study is ready to run optimization."""
|
||||
return self.status in [StudyStatus.READY_TO_RUN, StudyStatus.COMPLETED]
|
||||
|
||||
@property
|
||||
def error_count(self) -> int:
|
||||
"""Count of failed checks."""
|
||||
return len([c for c in self.checks if not c.passed])
|
||||
|
||||
@property
|
||||
def warning_count(self) -> int:
|
||||
"""Count of warnings (checks that passed with warnings)."""
|
||||
return len([c for c in self.checks
|
||||
if c.passed and 'warning' in c.message.lower()])
|
||||
|
||||
def __str__(self) -> str:
|
||||
lines = []
|
||||
|
||||
# Header
|
||||
lines.append("=" * 60)
|
||||
lines.append(f"STUDY VALIDATION: {self.study_name}")
|
||||
lines.append("=" * 60)
|
||||
lines.append("")
|
||||
|
||||
# Status
|
||||
status_icons = {
|
||||
StudyStatus.NOT_FOUND: "[X] NOT FOUND",
|
||||
StudyStatus.SETUP_INCOMPLETE: "[!] SETUP INCOMPLETE",
|
||||
StudyStatus.READY_TO_RUN: "[OK] READY TO RUN",
|
||||
StudyStatus.RUNNING: "[...] RUNNING",
|
||||
StudyStatus.COMPLETED: "[OK] COMPLETED",
|
||||
StudyStatus.HAS_ERRORS: "[X] HAS ERRORS"
|
||||
}
|
||||
lines.append(f"Status: {status_icons.get(self.status, str(self.status))}")
|
||||
lines.append("")
|
||||
|
||||
# Summary info
|
||||
if self.summary:
|
||||
lines.append("SUMMARY")
|
||||
lines.append("-" * 40)
|
||||
for key, value in self.summary.items():
|
||||
lines.append(f" {key}: {value}")
|
||||
lines.append("")
|
||||
|
||||
# Checks
|
||||
lines.append("VALIDATION CHECKS")
|
||||
lines.append("-" * 40)
|
||||
for check in self.checks:
|
||||
icon = "[OK]" if check.passed else "[X]"
|
||||
lines.append(f" {icon} {check.name}")
|
||||
if not check.passed or check.details:
|
||||
lines.append(f" {check.message}")
|
||||
lines.append("")
|
||||
|
||||
# Final verdict
|
||||
if self.status == StudyStatus.READY_TO_RUN:
|
||||
lines.append("Ready to run optimization!")
|
||||
lines.append(" Command: python run_optimization.py --trials 30")
|
||||
elif self.status == StudyStatus.COMPLETED:
|
||||
lines.append("Optimization completed. View results:")
|
||||
lines.append(" Command: python -m optimization_engine.validators.results_validator " + self.study_name)
|
||||
elif self.status == StudyStatus.SETUP_INCOMPLETE:
|
||||
lines.append("Complete setup before running:")
|
||||
for check in self.checks:
|
||||
if not check.passed:
|
||||
lines.append(f" - Fix: {check.message}")
|
||||
elif self.status == StudyStatus.HAS_ERRORS:
|
||||
lines.append("Fix errors before continuing:")
|
||||
for check in self.checks:
|
||||
if not check.passed:
|
||||
lines.append(f" - {check.message}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def validate_study(study_name: str, studies_dir: str = "studies") -> StudyValidationResult:
|
||||
"""
|
||||
Validate all aspects of an optimization study.
|
||||
|
||||
Args:
|
||||
study_name: Name of the study folder
|
||||
studies_dir: Base directory for studies (default: "studies")
|
||||
|
||||
Returns:
|
||||
StudyValidationResult with complete validation status
|
||||
"""
|
||||
checks: List[StudyCheckResult] = []
|
||||
summary: Dict[str, Any] = {}
|
||||
|
||||
study_path = Path(studies_dir) / study_name
|
||||
|
||||
# Check 1: Study folder exists
|
||||
if not study_path.exists():
|
||||
checks.append(StudyCheckResult(
|
||||
name="Study folder exists",
|
||||
passed=False,
|
||||
message=f"Study folder not found: {study_path}"
|
||||
))
|
||||
return StudyValidationResult(
|
||||
study_name=study_name,
|
||||
status=StudyStatus.NOT_FOUND,
|
||||
checks=checks,
|
||||
summary=summary
|
||||
)
|
||||
|
||||
checks.append(StudyCheckResult(
|
||||
name="Study folder exists",
|
||||
passed=True,
|
||||
message="OK"
|
||||
))
|
||||
|
||||
# Check 2: Required directory structure
|
||||
setup_dir = study_path / "1_setup"
|
||||
results_dir = study_path / "2_results"
|
||||
model_dir = setup_dir / "model"
|
||||
|
||||
structure_ok = True
|
||||
structure_msg = []
|
||||
|
||||
if not setup_dir.exists():
|
||||
structure_ok = False
|
||||
structure_msg.append("Missing 1_setup/")
|
||||
if not model_dir.exists():
|
||||
structure_ok = False
|
||||
structure_msg.append("Missing 1_setup/model/")
|
||||
|
||||
checks.append(StudyCheckResult(
|
||||
name="Directory structure",
|
||||
passed=structure_ok,
|
||||
message="OK" if structure_ok else f"Missing: {', '.join(structure_msg)}"
|
||||
))
|
||||
|
||||
# Check 3: Configuration file
|
||||
config_path = setup_dir / "optimization_config.json"
|
||||
config_valid = False
|
||||
config_details = {}
|
||||
|
||||
if config_path.exists():
|
||||
from .config_validator import validate_config_file
|
||||
config_result = validate_config_file(str(config_path))
|
||||
config_valid = config_result.is_valid
|
||||
config_details = {
|
||||
"errors": len(config_result.errors),
|
||||
"warnings": len(config_result.warnings)
|
||||
}
|
||||
summary["design_variables"] = len(config_result.config.get("design_variables", []))
|
||||
summary["objectives"] = len(config_result.config.get("objectives", []))
|
||||
summary["constraints"] = len(config_result.config.get("constraints", []))
|
||||
|
||||
if config_valid:
|
||||
msg = "Configuration valid"
|
||||
if config_result.warnings:
|
||||
msg += f" ({len(config_result.warnings)} warnings)"
|
||||
else:
|
||||
msg = f"{len(config_result.errors)} errors"
|
||||
else:
|
||||
msg = "optimization_config.json not found"
|
||||
|
||||
checks.append(StudyCheckResult(
|
||||
name="Configuration file",
|
||||
passed=config_valid,
|
||||
message=msg,
|
||||
details=config_details
|
||||
))
|
||||
|
||||
# Check 4: Model files
|
||||
model_valid = False
|
||||
model_details = {}
|
||||
|
||||
if model_dir.exists():
|
||||
from .model_validator import validate_study_model
|
||||
model_result = validate_study_model(study_name, studies_dir)
|
||||
model_valid = model_result.is_valid
|
||||
model_details = {
|
||||
"prt": model_result.prt_file is not None,
|
||||
"sim": model_result.sim_file is not None,
|
||||
"fem": model_result.fem_file is not None
|
||||
}
|
||||
|
||||
if model_result.model_name:
|
||||
summary["model_name"] = model_result.model_name
|
||||
|
||||
if model_valid:
|
||||
msg = "Model files valid"
|
||||
if model_result.warnings:
|
||||
msg += f" ({len(model_result.warnings)} warnings)"
|
||||
else:
|
||||
msg = f"{len(model_result.errors)} errors"
|
||||
else:
|
||||
msg = "Model directory not found"
|
||||
|
||||
checks.append(StudyCheckResult(
|
||||
name="Model files",
|
||||
passed=model_valid,
|
||||
message=msg,
|
||||
details=model_details
|
||||
))
|
||||
|
||||
# Check 5: Run script
|
||||
run_script = study_path / "run_optimization.py"
|
||||
run_script_exists = run_script.exists()
|
||||
|
||||
checks.append(StudyCheckResult(
|
||||
name="Run script",
|
||||
passed=run_script_exists,
|
||||
message="OK" if run_script_exists else "run_optimization.py not found"
|
||||
))
|
||||
|
||||
# Check 6: Results (if any)
|
||||
db_path = results_dir / "study.db"
|
||||
has_results = db_path.exists()
|
||||
results_valid = False
|
||||
results_details = {}
|
||||
|
||||
if has_results:
|
||||
from .results_validator import validate_results
|
||||
results_result = validate_results(
|
||||
str(db_path),
|
||||
str(config_path) if config_path.exists() else None
|
||||
)
|
||||
results_valid = results_result.is_valid
|
||||
results_details = {
|
||||
"trials": results_result.info.n_trials,
|
||||
"completed": results_result.info.n_completed,
|
||||
"failed": results_result.info.n_failed,
|
||||
"pareto": results_result.info.n_pareto
|
||||
}
|
||||
summary["trials_completed"] = results_result.info.n_completed
|
||||
summary["trials_failed"] = results_result.info.n_failed
|
||||
if results_result.info.n_pareto > 0:
|
||||
summary["pareto_designs"] = results_result.info.n_pareto
|
||||
|
||||
if results_valid:
|
||||
msg = f"{results_result.info.n_completed} completed trials"
|
||||
if results_result.info.is_multi_objective:
|
||||
msg += f", {results_result.info.n_pareto} Pareto-optimal"
|
||||
else:
|
||||
msg = f"{len(results_result.errors)} errors in results"
|
||||
|
||||
checks.append(StudyCheckResult(
|
||||
name="Optimization results",
|
||||
passed=results_valid,
|
||||
message=msg,
|
||||
details=results_details
|
||||
))
|
||||
else:
|
||||
checks.append(StudyCheckResult(
|
||||
name="Optimization results",
|
||||
passed=True, # Not having results is OK for a new study
|
||||
message="No results yet (study not run)",
|
||||
details={"exists": False}
|
||||
))
|
||||
|
||||
# Determine overall status
|
||||
critical_checks_passed = all([
|
||||
checks[0].passed, # folder exists
|
||||
checks[1].passed, # structure
|
||||
checks[2].passed, # config
|
||||
checks[3].passed, # model
|
||||
])
|
||||
|
||||
if not critical_checks_passed:
|
||||
status = StudyStatus.SETUP_INCOMPLETE
|
||||
elif has_results and results_valid:
|
||||
# Check if still running (look for lock file or recent activity)
|
||||
lock_file = results_dir / ".optimization_lock"
|
||||
if lock_file.exists():
|
||||
status = StudyStatus.RUNNING
|
||||
else:
|
||||
status = StudyStatus.COMPLETED
|
||||
elif has_results and not results_valid:
|
||||
status = StudyStatus.HAS_ERRORS
|
||||
else:
|
||||
status = StudyStatus.READY_TO_RUN
|
||||
|
||||
return StudyValidationResult(
|
||||
study_name=study_name,
|
||||
status=status,
|
||||
checks=checks,
|
||||
summary=summary
|
||||
)
|
||||
|
||||
|
||||
def list_studies(studies_dir: str = "studies") -> List[Dict[str, Any]]:
|
||||
"""
|
||||
List all studies and their validation status.
|
||||
|
||||
Args:
|
||||
studies_dir: Base directory for studies
|
||||
|
||||
Returns:
|
||||
List of dictionaries with study name and status
|
||||
"""
|
||||
studies_path = Path(studies_dir)
|
||||
results = []
|
||||
|
||||
if not studies_path.exists():
|
||||
return results
|
||||
|
||||
for study_folder in sorted(studies_path.iterdir()):
|
||||
if study_folder.is_dir() and not study_folder.name.startswith('.'):
|
||||
validation = validate_study(study_folder.name, studies_dir)
|
||||
results.append({
|
||||
"name": study_folder.name,
|
||||
"status": validation.status.value,
|
||||
"is_ready": validation.is_ready_to_run,
|
||||
"errors": validation.error_count,
|
||||
"trials": validation.summary.get("trials_completed", 0),
|
||||
"pareto": validation.summary.get("pareto_designs", 0)
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def quick_check(study_name: str, studies_dir: str = "studies") -> bool:
|
||||
"""
|
||||
Quick check if a study is ready to run.
|
||||
|
||||
Args:
|
||||
study_name: Name of the study
|
||||
studies_dir: Base directory for studies
|
||||
|
||||
Returns:
|
||||
True if ready to run, False otherwise
|
||||
"""
|
||||
result = validate_study(study_name, studies_dir)
|
||||
return result.is_ready_to_run
|
||||
|
||||
|
||||
def get_study_health(study_name: str, studies_dir: str = "studies") -> Dict[str, Any]:
|
||||
"""
|
||||
Get a simple health report for a study.
|
||||
|
||||
Args:
|
||||
study_name: Name of the study
|
||||
studies_dir: Base directory for studies
|
||||
|
||||
Returns:
|
||||
Dictionary with health information
|
||||
"""
|
||||
result = validate_study(study_name, studies_dir)
|
||||
|
||||
return {
|
||||
"name": study_name,
|
||||
"status": result.status.value,
|
||||
"is_ready": result.is_ready_to_run,
|
||||
"checks_passed": len([c for c in result.checks if c.passed]),
|
||||
"checks_total": len(result.checks),
|
||||
"error_count": result.error_count,
|
||||
"summary": result.summary
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
# List all studies
|
||||
print("Available studies:")
|
||||
print("-" * 60)
|
||||
studies = list_studies()
|
||||
if not studies:
|
||||
print(" No studies found in studies/")
|
||||
else:
|
||||
for study in studies:
|
||||
status_icon = "[OK]" if study["is_ready"] else "[X]"
|
||||
trials_info = f"{study['trials']} trials" if study['trials'] > 0 else "no trials"
|
||||
print(f" {status_icon} {study['name']}: {study['status']} ({trials_info})")
|
||||
print()
|
||||
print("Usage: python study_validator.py <study_name>")
|
||||
else:
|
||||
study_name = sys.argv[1]
|
||||
result = validate_study(study_name)
|
||||
print(result)
|
||||
Reference in New Issue
Block a user