feat: Add Studio UI, intake system, and extractor improvements
Dashboard: - Add Studio page with drag-drop model upload and Claude chat - Add intake system for study creation workflow - Improve session manager and context builder - Add intake API routes and frontend components Optimization Engine: - Add CLI module for command-line operations - Add intake module for study preprocessing - Add validation module with gate checks - Improve Zernike extractor documentation - Update spec models with better validation - Enhance solve_simulation robustness Documentation: - Add ATOMIZER_STUDIO.md planning doc - Add ATOMIZER_UX_SYSTEM.md for UX patterns - Update extractor library docs - Add study-readme-generator skill Tools: - Add test scripts for extraction validation - Add Zernike recentering test Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
540
optimization_engine/intake/context.py
Normal file
540
optimization_engine/intake/context.py
Normal file
@@ -0,0 +1,540 @@
|
||||
"""
|
||||
Study Context
|
||||
=============
|
||||
|
||||
Complete assembled context for study creation, combining:
|
||||
- Model introspection results
|
||||
- Context files (goals.md, PDFs, images)
|
||||
- Pre-configuration (intake.yaml)
|
||||
- LAC memory (similar studies, recommendations)
|
||||
|
||||
This context object is used by both Interview Mode and Canvas Mode
|
||||
to provide intelligent suggestions and pre-filled values.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional, List, Dict, Any
|
||||
from enum import Enum
|
||||
import json
|
||||
|
||||
|
||||
class ConfidenceLevel(str, Enum):
|
||||
"""Confidence level for suggestions."""
|
||||
|
||||
HIGH = "high"
|
||||
MEDIUM = "medium"
|
||||
LOW = "low"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExpressionInfo:
|
||||
"""Information about an NX expression."""
|
||||
|
||||
name: str
|
||||
value: Optional[float] = None
|
||||
units: Optional[str] = None
|
||||
formula: Optional[str] = None
|
||||
type: str = "Number"
|
||||
is_design_candidate: bool = False
|
||||
confidence: ConfidenceLevel = ConfidenceLevel.MEDIUM
|
||||
reason: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"value": self.value,
|
||||
"units": self.units,
|
||||
"formula": self.formula,
|
||||
"type": self.type,
|
||||
"is_design_candidate": self.is_design_candidate,
|
||||
"confidence": self.confidence.value,
|
||||
"reason": self.reason,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class SolutionInfo:
|
||||
"""Information about an NX solution."""
|
||||
|
||||
name: str
|
||||
type: str # SOL 101, SOL 103, etc.
|
||||
description: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class BoundaryConditionInfo:
|
||||
"""Information about a boundary condition."""
|
||||
|
||||
name: str
|
||||
type: str # Fixed, Pinned, etc.
|
||||
location: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoadInfo:
|
||||
"""Information about a load."""
|
||||
|
||||
name: str
|
||||
type: str # Force, Pressure, etc.
|
||||
magnitude: Optional[float] = None
|
||||
units: Optional[str] = None
|
||||
location: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MaterialInfo:
|
||||
"""Information about a material in the model."""
|
||||
|
||||
name: str
|
||||
yield_stress: Optional[float] = None
|
||||
density: Optional[float] = None
|
||||
youngs_modulus: Optional[float] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MeshInfo:
|
||||
"""Information about the mesh."""
|
||||
|
||||
element_count: int = 0
|
||||
node_count: int = 0
|
||||
element_types: List[str] = field(default_factory=list)
|
||||
quality_metrics: Dict[str, float] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BaselineResult:
|
||||
"""Results from baseline solve."""
|
||||
|
||||
mass_kg: Optional[float] = None
|
||||
max_displacement_mm: Optional[float] = None
|
||||
max_stress_mpa: Optional[float] = None
|
||||
max_strain: Optional[float] = None
|
||||
first_frequency_hz: Optional[float] = None
|
||||
strain_energy_j: Optional[float] = None
|
||||
solve_time_seconds: Optional[float] = None
|
||||
success: bool = False
|
||||
error: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"mass_kg": self.mass_kg,
|
||||
"max_displacement_mm": self.max_displacement_mm,
|
||||
"max_stress_mpa": self.max_stress_mpa,
|
||||
"max_strain": self.max_strain,
|
||||
"first_frequency_hz": self.first_frequency_hz,
|
||||
"strain_energy_j": self.strain_energy_j,
|
||||
"solve_time_seconds": self.solve_time_seconds,
|
||||
"success": self.success,
|
||||
"error": self.error,
|
||||
}
|
||||
|
||||
def get_summary(self) -> str:
|
||||
"""Get a human-readable summary of baseline results."""
|
||||
if not self.success:
|
||||
return f"Baseline solve failed: {self.error or 'Unknown error'}"
|
||||
|
||||
parts = []
|
||||
if self.mass_kg is not None:
|
||||
parts.append(f"mass={self.mass_kg:.2f}kg")
|
||||
if self.max_displacement_mm is not None:
|
||||
parts.append(f"disp={self.max_displacement_mm:.3f}mm")
|
||||
if self.max_stress_mpa is not None:
|
||||
parts.append(f"stress={self.max_stress_mpa:.1f}MPa")
|
||||
if self.first_frequency_hz is not None:
|
||||
parts.append(f"freq={self.first_frequency_hz:.1f}Hz")
|
||||
|
||||
return ", ".join(parts) if parts else "No results"
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntrospectionData:
|
||||
"""Complete introspection results from NX model."""
|
||||
|
||||
success: bool = False
|
||||
timestamp: Optional[datetime] = None
|
||||
error: Optional[str] = None
|
||||
|
||||
# Part information
|
||||
expressions: List[ExpressionInfo] = field(default_factory=list)
|
||||
bodies: List[Dict[str, Any]] = field(default_factory=list)
|
||||
|
||||
# Simulation information
|
||||
solutions: List[SolutionInfo] = field(default_factory=list)
|
||||
boundary_conditions: List[BoundaryConditionInfo] = field(default_factory=list)
|
||||
loads: List[LoadInfo] = field(default_factory=list)
|
||||
materials: List[MaterialInfo] = field(default_factory=list)
|
||||
mesh_info: Optional[MeshInfo] = None
|
||||
|
||||
# Available result types (from OP2)
|
||||
available_results: Dict[str, bool] = field(default_factory=dict)
|
||||
subcases: List[int] = field(default_factory=list)
|
||||
|
||||
# Baseline solve
|
||||
baseline: Optional[BaselineResult] = None
|
||||
|
||||
def get_expression_names(self) -> List[str]:
|
||||
"""Get list of all expression names."""
|
||||
return [e.name for e in self.expressions]
|
||||
|
||||
def get_design_candidates(self) -> List[ExpressionInfo]:
|
||||
"""Get expressions that look like design variables."""
|
||||
return [e for e in self.expressions if e.is_design_candidate]
|
||||
|
||||
def get_expression(self, name: str) -> Optional[ExpressionInfo]:
|
||||
"""Get expression by name."""
|
||||
for expr in self.expressions:
|
||||
if expr.name == name:
|
||||
return expr
|
||||
return None
|
||||
|
||||
def get_solver_type(self) -> Optional[str]:
|
||||
"""Get the primary solver type (SOL 101, etc.)."""
|
||||
if self.solutions:
|
||||
return self.solutions[0].type
|
||||
return None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for JSON serialization."""
|
||||
return {
|
||||
"success": self.success,
|
||||
"timestamp": self.timestamp.isoformat() if self.timestamp else None,
|
||||
"error": self.error,
|
||||
"expressions": [e.to_dict() for e in self.expressions],
|
||||
"solutions": [{"name": s.name, "type": s.type} for s in self.solutions],
|
||||
"boundary_conditions": [
|
||||
{"name": bc.name, "type": bc.type} for bc in self.boundary_conditions
|
||||
],
|
||||
"loads": [
|
||||
{"name": l.name, "type": l.type, "magnitude": l.magnitude} for l in self.loads
|
||||
],
|
||||
"materials": [{"name": m.name, "yield_stress": m.yield_stress} for m in self.materials],
|
||||
"available_results": self.available_results,
|
||||
"subcases": self.subcases,
|
||||
"baseline": self.baseline.to_dict() if self.baseline else None,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "IntrospectionData":
|
||||
"""Create from dictionary."""
|
||||
introspection = cls(
|
||||
success=data.get("success", False),
|
||||
error=data.get("error"),
|
||||
)
|
||||
|
||||
if data.get("timestamp"):
|
||||
introspection.timestamp = datetime.fromisoformat(data["timestamp"])
|
||||
|
||||
# Parse expressions
|
||||
for expr_data in data.get("expressions", []):
|
||||
introspection.expressions.append(
|
||||
ExpressionInfo(
|
||||
name=expr_data["name"],
|
||||
value=expr_data.get("value"),
|
||||
units=expr_data.get("units"),
|
||||
formula=expr_data.get("formula"),
|
||||
type=expr_data.get("type", "Number"),
|
||||
is_design_candidate=expr_data.get("is_design_candidate", False),
|
||||
confidence=ConfidenceLevel(expr_data.get("confidence", "medium")),
|
||||
)
|
||||
)
|
||||
|
||||
# Parse solutions
|
||||
for sol_data in data.get("solutions", []):
|
||||
introspection.solutions.append(
|
||||
SolutionInfo(
|
||||
name=sol_data["name"],
|
||||
type=sol_data["type"],
|
||||
)
|
||||
)
|
||||
|
||||
introspection.available_results = data.get("available_results", {})
|
||||
introspection.subcases = data.get("subcases", [])
|
||||
|
||||
# Parse baseline
|
||||
if data.get("baseline"):
|
||||
baseline_data = data["baseline"]
|
||||
introspection.baseline = BaselineResult(
|
||||
mass_kg=baseline_data.get("mass_kg"),
|
||||
max_displacement_mm=baseline_data.get("max_displacement_mm"),
|
||||
max_stress_mpa=baseline_data.get("max_stress_mpa"),
|
||||
solve_time_seconds=baseline_data.get("solve_time_seconds"),
|
||||
success=baseline_data.get("success", False),
|
||||
error=baseline_data.get("error"),
|
||||
)
|
||||
|
||||
return introspection
|
||||
|
||||
|
||||
@dataclass
|
||||
class DVSuggestion:
|
||||
"""Suggested design variable."""
|
||||
|
||||
name: str
|
||||
current_value: Optional[float] = None
|
||||
suggested_bounds: Optional[tuple[float, float]] = None
|
||||
units: Optional[str] = None
|
||||
confidence: ConfidenceLevel = ConfidenceLevel.MEDIUM
|
||||
reason: str = ""
|
||||
source: str = "introspection" # introspection, preconfig, lac
|
||||
lac_insight: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"current_value": self.current_value,
|
||||
"suggested_bounds": list(self.suggested_bounds) if self.suggested_bounds else None,
|
||||
"units": self.units,
|
||||
"confidence": self.confidence.value,
|
||||
"reason": self.reason,
|
||||
"source": self.source,
|
||||
"lac_insight": self.lac_insight,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObjectiveSuggestion:
|
||||
"""Suggested optimization objective."""
|
||||
|
||||
name: str
|
||||
goal: str # minimize, maximize
|
||||
extractor: str
|
||||
confidence: ConfidenceLevel = ConfidenceLevel.MEDIUM
|
||||
reason: str = ""
|
||||
source: str = "goals"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConstraintSuggestion:
|
||||
"""Suggested optimization constraint."""
|
||||
|
||||
name: str
|
||||
type: str # less_than, greater_than
|
||||
suggested_threshold: Optional[float] = None
|
||||
units: Optional[str] = None
|
||||
confidence: ConfidenceLevel = ConfidenceLevel.MEDIUM
|
||||
reason: str = ""
|
||||
source: str = "requirements"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageAnalysis:
|
||||
"""Analysis result from Claude Vision for an image."""
|
||||
|
||||
image_path: Path
|
||||
component_type: Optional[str] = None
|
||||
dimensions: List[str] = field(default_factory=list)
|
||||
load_conditions: List[str] = field(default_factory=list)
|
||||
annotations: List[str] = field(default_factory=list)
|
||||
suggestions: List[str] = field(default_factory=list)
|
||||
raw_analysis: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class LACInsight:
|
||||
"""Insight from Learning Atomizer Core."""
|
||||
|
||||
study_name: str
|
||||
similarity_score: float
|
||||
geometry_type: str
|
||||
method_used: str
|
||||
objectives: List[str]
|
||||
trials_to_convergence: Optional[int] = None
|
||||
success: bool = True
|
||||
lesson: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class StudyContext:
|
||||
"""
|
||||
Complete context for study creation.
|
||||
|
||||
This is the central data structure that combines all information
|
||||
gathered during intake processing, ready for use by Interview Mode
|
||||
or Canvas Mode.
|
||||
"""
|
||||
|
||||
# === Identity ===
|
||||
study_name: str
|
||||
source_folder: Path
|
||||
created_at: datetime = field(default_factory=datetime.now)
|
||||
|
||||
# === Model Files ===
|
||||
sim_file: Optional[Path] = None
|
||||
fem_file: Optional[Path] = None
|
||||
prt_file: Optional[Path] = None
|
||||
idealized_prt_file: Optional[Path] = None
|
||||
|
||||
# === From Introspection ===
|
||||
introspection: Optional[IntrospectionData] = None
|
||||
|
||||
# === From Context Files ===
|
||||
goals_text: Optional[str] = None
|
||||
requirements_text: Optional[str] = None
|
||||
constraints_text: Optional[str] = None
|
||||
notes_text: Optional[str] = None
|
||||
image_analyses: List[ImageAnalysis] = field(default_factory=list)
|
||||
|
||||
# === From intake.yaml ===
|
||||
preconfig: Optional[Any] = None # IntakeConfig, imported dynamically to avoid circular import
|
||||
|
||||
# === From LAC ===
|
||||
similar_studies: List[LACInsight] = field(default_factory=list)
|
||||
recommended_method: Optional[str] = None
|
||||
known_issues: List[str] = field(default_factory=list)
|
||||
user_preferences: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
# === Derived Suggestions ===
|
||||
suggested_dvs: List[DVSuggestion] = field(default_factory=list)
|
||||
suggested_objectives: List[ObjectiveSuggestion] = field(default_factory=list)
|
||||
suggested_constraints: List[ConstraintSuggestion] = field(default_factory=list)
|
||||
|
||||
# === Status ===
|
||||
warnings: List[str] = field(default_factory=list)
|
||||
errors: List[str] = field(default_factory=list)
|
||||
|
||||
@property
|
||||
def has_introspection(self) -> bool:
|
||||
"""Check if introspection data is available."""
|
||||
return self.introspection is not None and self.introspection.success
|
||||
|
||||
@property
|
||||
def has_baseline(self) -> bool:
|
||||
"""Check if baseline results are available."""
|
||||
return (
|
||||
self.introspection is not None
|
||||
and self.introspection.baseline is not None
|
||||
and self.introspection.baseline.success
|
||||
)
|
||||
|
||||
@property
|
||||
def has_preconfig(self) -> bool:
|
||||
"""Check if pre-configuration is available."""
|
||||
return self.preconfig is not None
|
||||
|
||||
@property
|
||||
def ready_for_interview(self) -> bool:
|
||||
"""Check if context is ready for interview mode."""
|
||||
return self.has_introspection and len(self.errors) == 0
|
||||
|
||||
@property
|
||||
def ready_for_canvas(self) -> bool:
|
||||
"""Check if context is ready for canvas mode."""
|
||||
return self.has_introspection and self.sim_file is not None
|
||||
|
||||
def get_baseline_summary(self) -> str:
|
||||
"""Get human-readable baseline summary."""
|
||||
if self.introspection is None:
|
||||
return "No baseline data"
|
||||
if self.introspection.baseline is None:
|
||||
return "No baseline data"
|
||||
return self.introspection.baseline.get_summary()
|
||||
|
||||
def get_missing_required(self) -> List[str]:
|
||||
"""Get list of missing required items."""
|
||||
missing = []
|
||||
|
||||
if self.sim_file is None:
|
||||
missing.append("Simulation file (.sim)")
|
||||
if not self.has_introspection:
|
||||
missing.append("Model introspection")
|
||||
|
||||
return missing
|
||||
|
||||
def get_context_summary(self) -> Dict[str, Any]:
|
||||
"""Get a summary of loaded context for display."""
|
||||
return {
|
||||
"study_name": self.study_name,
|
||||
"has_model": self.sim_file is not None,
|
||||
"has_introspection": self.has_introspection,
|
||||
"has_baseline": self.has_baseline,
|
||||
"has_goals": self.goals_text is not None,
|
||||
"has_requirements": self.requirements_text is not None,
|
||||
"has_preconfig": self.has_preconfig,
|
||||
"num_expressions": len(self.introspection.expressions) if self.introspection else 0,
|
||||
"num_dv_candidates": len(self.introspection.get_design_candidates())
|
||||
if self.introspection
|
||||
else 0,
|
||||
"num_similar_studies": len(self.similar_studies),
|
||||
"warnings": self.warnings,
|
||||
"errors": self.errors,
|
||||
}
|
||||
|
||||
def to_interview_context(self) -> Dict[str, Any]:
|
||||
"""Get context formatted for interview mode."""
|
||||
return {
|
||||
"study_name": self.study_name,
|
||||
"baseline": (
|
||||
self.introspection.baseline.to_dict()
|
||||
if self.introspection is not None and self.introspection.baseline is not None
|
||||
else None
|
||||
),
|
||||
"expressions": [e.to_dict() for e in self.introspection.expressions]
|
||||
if self.introspection
|
||||
else [],
|
||||
"design_candidates": [e.to_dict() for e in self.introspection.get_design_candidates()]
|
||||
if self.introspection
|
||||
else [],
|
||||
"solver_type": self.introspection.get_solver_type() if self.introspection else None,
|
||||
"goals_text": self.goals_text,
|
||||
"requirements_text": self.requirements_text,
|
||||
"preconfig": self.preconfig.model_dump() if self.preconfig else None,
|
||||
"suggested_dvs": [dv.to_dict() for dv in self.suggested_dvs],
|
||||
"similar_studies": [
|
||||
{"name": s.study_name, "method": s.method_used, "similarity": s.similarity_score}
|
||||
for s in self.similar_studies
|
||||
],
|
||||
"recommended_method": self.recommended_method,
|
||||
}
|
||||
|
||||
def save(self, output_path: Path) -> None:
|
||||
"""Save context to JSON file."""
|
||||
data = {
|
||||
"study_name": self.study_name,
|
||||
"source_folder": str(self.source_folder),
|
||||
"created_at": self.created_at.isoformat(),
|
||||
"sim_file": str(self.sim_file) if self.sim_file else None,
|
||||
"fem_file": str(self.fem_file) if self.fem_file else None,
|
||||
"prt_file": str(self.prt_file) if self.prt_file else None,
|
||||
"introspection": self.introspection.to_dict() if self.introspection else None,
|
||||
"goals_text": self.goals_text,
|
||||
"requirements_text": self.requirements_text,
|
||||
"suggested_dvs": [dv.to_dict() for dv in self.suggested_dvs],
|
||||
"warnings": self.warnings,
|
||||
"errors": self.errors,
|
||||
}
|
||||
|
||||
with open(output_path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
@classmethod
|
||||
def load(cls, input_path: Path) -> "StudyContext":
|
||||
"""Load context from JSON file."""
|
||||
with open(input_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
|
||||
context = cls(
|
||||
study_name=data["study_name"],
|
||||
source_folder=Path(data["source_folder"]),
|
||||
created_at=datetime.fromisoformat(data["created_at"]),
|
||||
)
|
||||
|
||||
if data.get("sim_file"):
|
||||
context.sim_file = Path(data["sim_file"])
|
||||
if data.get("fem_file"):
|
||||
context.fem_file = Path(data["fem_file"])
|
||||
if data.get("prt_file"):
|
||||
context.prt_file = Path(data["prt_file"])
|
||||
|
||||
if data.get("introspection"):
|
||||
context.introspection = IntrospectionData.from_dict(data["introspection"])
|
||||
|
||||
context.goals_text = data.get("goals_text")
|
||||
context.requirements_text = data.get("requirements_text")
|
||||
context.warnings = data.get("warnings", [])
|
||||
context.errors = data.get("errors", [])
|
||||
|
||||
return context
|
||||
Reference in New Issue
Block a user