feat(canvas): Custom extractor components, migrator, and MCP spec tools
Canvas Components: - CustomExtractorNode.tsx: Node for custom Python extractors - CustomExtractorPanel.tsx: Configuration panel for custom extractors - ConnectionStatusIndicator.tsx: WebSocket status display - atomizer-spec.ts: TypeScript types for AtomizerSpec v2.0 Config: - migrator.py: Legacy config to AtomizerSpec v2.0 migration - Updated __init__.py exports for config and extractors MCP Tools: - spec.ts: MCP tools for spec manipulation - index.ts: Tool registration updates
This commit is contained in:
@@ -0,0 +1,49 @@
|
||||
/**
|
||||
* ConnectionStatusIndicator - Visual indicator for WebSocket connection status.
|
||||
*/
|
||||
|
||||
import { ConnectionStatus } from '../../hooks/useSpecWebSocket';
|
||||
|
||||
interface ConnectionStatusIndicatorProps {
|
||||
status: ConnectionStatus;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Visual indicator for WebSocket connection status.
|
||||
* Can be used in the canvas UI to show sync state.
|
||||
*/
|
||||
export function ConnectionStatusIndicator({
|
||||
status,
|
||||
className = '',
|
||||
}: ConnectionStatusIndicatorProps) {
|
||||
const statusConfig = {
|
||||
disconnected: {
|
||||
color: 'bg-gray-500',
|
||||
label: 'Disconnected',
|
||||
},
|
||||
connecting: {
|
||||
color: 'bg-yellow-500 animate-pulse',
|
||||
label: 'Connecting...',
|
||||
},
|
||||
connected: {
|
||||
color: 'bg-green-500',
|
||||
label: 'Connected',
|
||||
},
|
||||
reconnecting: {
|
||||
color: 'bg-yellow-500 animate-pulse',
|
||||
label: 'Reconnecting...',
|
||||
},
|
||||
};
|
||||
|
||||
const config = statusConfig[status];
|
||||
|
||||
return (
|
||||
<div className={`flex items-center gap-2 ${className}`}>
|
||||
<div className={`w-2 h-2 rounded-full ${config.color}`} />
|
||||
<span className="text-xs text-dark-400">{config.label}</span>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default ConnectionStatusIndicator;
|
||||
@@ -0,0 +1,58 @@
|
||||
/**
|
||||
* CustomExtractorNode - Canvas node for custom Python extractors
|
||||
*
|
||||
* Displays custom extractors defined with inline Python code.
|
||||
* Visually distinct from builtin extractors with a code icon.
|
||||
*
|
||||
* P3.11: Custom extractor UI component
|
||||
*/
|
||||
|
||||
import { memo } from 'react';
|
||||
import { NodeProps } from 'reactflow';
|
||||
import { Code2 } from 'lucide-react';
|
||||
import { BaseNode } from './BaseNode';
|
||||
|
||||
export interface CustomExtractorNodeData {
|
||||
type: 'customExtractor';
|
||||
label: string;
|
||||
configured: boolean;
|
||||
extractorId?: string;
|
||||
extractorName?: string;
|
||||
functionName?: string;
|
||||
functionSource?: string;
|
||||
outputs?: Array<{ name: string; units?: string }>;
|
||||
dependencies?: string[];
|
||||
}
|
||||
|
||||
function CustomExtractorNodeComponent(props: NodeProps<CustomExtractorNodeData>) {
|
||||
const { data } = props;
|
||||
|
||||
// Show validation status
|
||||
const hasCode = !!data.functionSource?.trim();
|
||||
const hasOutputs = (data.outputs?.length ?? 0) > 0;
|
||||
const isConfigured = hasCode && hasOutputs;
|
||||
|
||||
return (
|
||||
<BaseNode
|
||||
{...props}
|
||||
icon={<Code2 size={16} />}
|
||||
iconColor={isConfigured ? 'text-violet-400' : 'text-dark-500'}
|
||||
>
|
||||
<div className="flex flex-col">
|
||||
<span className={isConfigured ? 'text-white' : 'text-dark-400'}>
|
||||
{data.extractorName || data.functionName || 'Custom Extractor'}
|
||||
</span>
|
||||
{!isConfigured && (
|
||||
<span className="text-xs text-amber-400">Needs configuration</span>
|
||||
)}
|
||||
{isConfigured && data.outputs && (
|
||||
<span className="text-xs text-dark-400">
|
||||
{data.outputs.length} output{data.outputs.length !== 1 ? 's' : ''}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
</BaseNode>
|
||||
);
|
||||
}
|
||||
|
||||
export const CustomExtractorNode = memo(CustomExtractorNodeComponent);
|
||||
@@ -0,0 +1,360 @@
|
||||
/**
|
||||
* CustomExtractorPanel - Panel for editing custom Python extractors
|
||||
*
|
||||
* Provides a code editor for writing custom extraction functions,
|
||||
* output definitions, and validation.
|
||||
*
|
||||
* P3.12: Custom extractor UI component
|
||||
*/
|
||||
|
||||
import { useState, useCallback } from 'react';
|
||||
import { X, Play, AlertCircle, CheckCircle, Plus, Trash2, HelpCircle } from 'lucide-react';
|
||||
|
||||
interface CustomExtractorOutput {
|
||||
name: string;
|
||||
units?: string;
|
||||
description?: string;
|
||||
}
|
||||
|
||||
interface CustomExtractorPanelProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
initialName?: string;
|
||||
initialFunctionName?: string;
|
||||
initialSource?: string;
|
||||
initialOutputs?: CustomExtractorOutput[];
|
||||
initialDependencies?: string[];
|
||||
onSave: (data: {
|
||||
name: string;
|
||||
functionName: string;
|
||||
source: string;
|
||||
outputs: CustomExtractorOutput[];
|
||||
dependencies: string[];
|
||||
}) => void;
|
||||
}
|
||||
|
||||
// Common styling classes
|
||||
const inputClass =
|
||||
'w-full px-3 py-2 bg-dark-800 border border-dark-600 text-white placeholder-dark-400 rounded-lg focus:border-primary-500 focus:outline-none transition-colors';
|
||||
const labelClass = 'block text-sm font-medium text-dark-300 mb-1';
|
||||
|
||||
// Default extractor template
|
||||
const DEFAULT_SOURCE = `def extract(op2_path, bdf_path=None, params=None, working_dir=None):
|
||||
"""
|
||||
Custom extractor function.
|
||||
|
||||
Args:
|
||||
op2_path: Path to the OP2 results file
|
||||
bdf_path: Optional path to the BDF model file
|
||||
params: Dictionary of current design parameters
|
||||
working_dir: Path to the current trial directory
|
||||
|
||||
Returns:
|
||||
Dictionary of output_name -> value
|
||||
OR a single float value
|
||||
OR a list/tuple of values (mapped to outputs in order)
|
||||
"""
|
||||
import numpy as np
|
||||
from pyNastran.op2.op2 import OP2
|
||||
|
||||
# Load OP2 results
|
||||
op2 = OP2(op2_path, debug=False)
|
||||
|
||||
# Example: compute custom metric
|
||||
# ... your extraction logic here ...
|
||||
|
||||
result = 0.0
|
||||
|
||||
return {"custom_output": result}
|
||||
`;
|
||||
|
||||
export function CustomExtractorPanel({
|
||||
isOpen,
|
||||
onClose,
|
||||
initialName = '',
|
||||
initialFunctionName = 'extract',
|
||||
initialSource = DEFAULT_SOURCE,
|
||||
initialOutputs = [{ name: 'custom_output', units: '' }],
|
||||
initialDependencies = [],
|
||||
onSave,
|
||||
}: CustomExtractorPanelProps) {
|
||||
const [name, setName] = useState(initialName);
|
||||
const [functionName, setFunctionName] = useState(initialFunctionName);
|
||||
const [source, setSource] = useState(initialSource);
|
||||
const [outputs, setOutputs] = useState<CustomExtractorOutput[]>(initialOutputs);
|
||||
const [dependencies] = useState<string[]>(initialDependencies);
|
||||
const [validation, setValidation] = useState<{
|
||||
valid: boolean;
|
||||
errors: string[];
|
||||
} | null>(null);
|
||||
const [isValidating, setIsValidating] = useState(false);
|
||||
const [showHelp, setShowHelp] = useState(false);
|
||||
|
||||
// Add a new output
|
||||
const addOutput = useCallback(() => {
|
||||
setOutputs((prev) => [...prev, { name: '', units: '' }]);
|
||||
}, []);
|
||||
|
||||
// Remove an output
|
||||
const removeOutput = useCallback((index: number) => {
|
||||
setOutputs((prev) => prev.filter((_, i) => i !== index));
|
||||
}, []);
|
||||
|
||||
// Update an output
|
||||
const updateOutput = useCallback(
|
||||
(index: number, field: keyof CustomExtractorOutput, value: string) => {
|
||||
setOutputs((prev) =>
|
||||
prev.map((out, i) => (i === index ? { ...out, [field]: value } : out))
|
||||
);
|
||||
},
|
||||
[]
|
||||
);
|
||||
|
||||
// Validate the code
|
||||
const validateCode = useCallback(async () => {
|
||||
setIsValidating(true);
|
||||
setValidation(null);
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/spec/validate-extractor', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
function_name: functionName,
|
||||
source: source,
|
||||
}),
|
||||
});
|
||||
|
||||
const result = await response.json();
|
||||
setValidation({
|
||||
valid: result.valid,
|
||||
errors: result.errors || [],
|
||||
});
|
||||
} catch (error) {
|
||||
setValidation({
|
||||
valid: false,
|
||||
errors: ['Failed to validate: ' + (error instanceof Error ? error.message : 'Unknown error')],
|
||||
});
|
||||
} finally {
|
||||
setIsValidating(false);
|
||||
}
|
||||
}, [functionName, source]);
|
||||
|
||||
// Handle save
|
||||
const handleSave = useCallback(() => {
|
||||
// Filter out empty outputs
|
||||
const validOutputs = outputs.filter((o) => o.name.trim());
|
||||
|
||||
if (!name.trim()) {
|
||||
setValidation({ valid: false, errors: ['Name is required'] });
|
||||
return;
|
||||
}
|
||||
|
||||
if (validOutputs.length === 0) {
|
||||
setValidation({ valid: false, errors: ['At least one output is required'] });
|
||||
return;
|
||||
}
|
||||
|
||||
onSave({
|
||||
name: name.trim(),
|
||||
functionName: functionName.trim() || 'extract',
|
||||
source,
|
||||
outputs: validOutputs,
|
||||
dependencies: dependencies.filter((d) => d.trim()),
|
||||
});
|
||||
onClose();
|
||||
}, [name, functionName, source, outputs, dependencies, onSave, onClose]);
|
||||
|
||||
if (!isOpen) return null;
|
||||
|
||||
return (
|
||||
<div className="fixed inset-0 bg-black/50 flex items-center justify-center z-50">
|
||||
<div className="bg-dark-850 rounded-xl shadow-2xl w-[900px] max-h-[90vh] flex flex-col border border-dark-700">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-6 py-4 border-b border-dark-700">
|
||||
<h2 className="text-lg font-semibold text-white">Custom Extractor</h2>
|
||||
<div className="flex items-center gap-2">
|
||||
<button
|
||||
onClick={() => setShowHelp(!showHelp)}
|
||||
className="p-2 text-dark-400 hover:text-white hover:bg-dark-700 rounded-lg transition-colors"
|
||||
title="Show help"
|
||||
>
|
||||
<HelpCircle size={20} />
|
||||
</button>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="p-2 text-dark-400 hover:text-white hover:bg-dark-700 rounded-lg transition-colors"
|
||||
>
|
||||
<X size={20} />
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="flex-1 overflow-auto p-6">
|
||||
{/* Help Section */}
|
||||
{showHelp && (
|
||||
<div className="mb-4 p-4 bg-primary-900/20 border border-primary-700 rounded-lg">
|
||||
<h3 className="text-sm font-semibold text-primary-400 mb-2">How Custom Extractors Work</h3>
|
||||
<ul className="text-sm text-dark-300 space-y-1">
|
||||
<li>• Your function receives the path to OP2 results and optional BDF/params</li>
|
||||
<li>• Use pyNastran, numpy, scipy for data extraction and analysis</li>
|
||||
<li>• Return a dictionary mapping output names to numeric values</li>
|
||||
<li>• Outputs can be used as objectives or constraints in optimization</li>
|
||||
<li>• Code runs in a sandboxed environment (no file I/O beyond OP2/BDF)</li>
|
||||
</ul>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="grid grid-cols-2 gap-6">
|
||||
{/* Left Column - Basic Info & Outputs */}
|
||||
<div className="space-y-4">
|
||||
{/* Name */}
|
||||
<div>
|
||||
<label className={labelClass}>Extractor Name</label>
|
||||
<input
|
||||
type="text"
|
||||
value={name}
|
||||
onChange={(e) => setName(e.target.value)}
|
||||
placeholder="My Custom Extractor"
|
||||
className={inputClass}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Function Name */}
|
||||
<div>
|
||||
<label className={labelClass}>Function Name</label>
|
||||
<input
|
||||
type="text"
|
||||
value={functionName}
|
||||
onChange={(e) => setFunctionName(e.target.value)}
|
||||
placeholder="extract"
|
||||
className={`${inputClass} font-mono`}
|
||||
/>
|
||||
<p className="text-xs text-dark-500 mt-1">
|
||||
Name of the Python function in your code
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Outputs */}
|
||||
<div>
|
||||
<label className={labelClass}>Outputs</label>
|
||||
<div className="space-y-2">
|
||||
{outputs.map((output, index) => (
|
||||
<div key={index} className="flex gap-2">
|
||||
<input
|
||||
type="text"
|
||||
value={output.name}
|
||||
onChange={(e) => updateOutput(index, 'name', e.target.value)}
|
||||
placeholder="output_name"
|
||||
className={`${inputClass} font-mono flex-1`}
|
||||
/>
|
||||
<input
|
||||
type="text"
|
||||
value={output.units || ''}
|
||||
onChange={(e) => updateOutput(index, 'units', e.target.value)}
|
||||
placeholder="units"
|
||||
className={`${inputClass} w-24`}
|
||||
/>
|
||||
<button
|
||||
onClick={() => removeOutput(index)}
|
||||
className="p-2 text-red-400 hover:text-red-300 hover:bg-red-900/20 rounded-lg transition-colors"
|
||||
disabled={outputs.length === 1}
|
||||
>
|
||||
<Trash2 size={16} />
|
||||
</button>
|
||||
</div>
|
||||
))}
|
||||
<button
|
||||
onClick={addOutput}
|
||||
className="flex items-center gap-1 text-sm text-primary-400 hover:text-primary-300 transition-colors"
|
||||
>
|
||||
<Plus size={14} />
|
||||
Add Output
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Validation Status */}
|
||||
{validation && (
|
||||
<div
|
||||
className={`p-3 rounded-lg border ${
|
||||
validation.valid
|
||||
? 'bg-green-900/20 border-green-700'
|
||||
: 'bg-red-900/20 border-red-700'
|
||||
}`}
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
{validation.valid ? (
|
||||
<CheckCircle size={16} className="text-green-400" />
|
||||
) : (
|
||||
<AlertCircle size={16} className="text-red-400" />
|
||||
)}
|
||||
<span
|
||||
className={`text-sm font-medium ${
|
||||
validation.valid ? 'text-green-400' : 'text-red-400'
|
||||
}`}
|
||||
>
|
||||
{validation.valid ? 'Code is valid' : 'Validation failed'}
|
||||
</span>
|
||||
</div>
|
||||
{validation.errors.length > 0 && (
|
||||
<ul className="mt-2 text-sm text-red-300 space-y-1">
|
||||
{validation.errors.map((err, i) => (
|
||||
<li key={i}>• {err}</li>
|
||||
))}
|
||||
</ul>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Right Column - Code Editor */}
|
||||
<div className="space-y-2">
|
||||
<div className="flex items-center justify-between">
|
||||
<label className={labelClass}>Python Code</label>
|
||||
<button
|
||||
onClick={validateCode}
|
||||
disabled={isValidating}
|
||||
className="flex items-center gap-1 px-3 py-1 bg-primary-600 hover:bg-primary-500
|
||||
text-white text-sm rounded-lg transition-colors disabled:opacity-50"
|
||||
>
|
||||
<Play size={14} />
|
||||
{isValidating ? 'Validating...' : 'Validate'}
|
||||
</button>
|
||||
</div>
|
||||
<textarea
|
||||
value={source}
|
||||
onChange={(e) => {
|
||||
setSource(e.target.value);
|
||||
setValidation(null);
|
||||
}}
|
||||
className={`${inputClass} h-[400px] font-mono text-sm resize-none`}
|
||||
spellCheck={false}
|
||||
/>
|
||||
<p className="text-xs text-dark-500">
|
||||
Available modules: numpy, scipy, pyNastran, math, statistics
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
<div className="flex items-center justify-end gap-3 px-6 py-4 border-t border-dark-700">
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="px-4 py-2 text-dark-300 hover:text-white hover:bg-dark-700 rounded-lg transition-colors"
|
||||
>
|
||||
Cancel
|
||||
</button>
|
||||
<button
|
||||
onClick={handleSave}
|
||||
className="px-4 py-2 bg-primary-600 hover:bg-primary-500 text-white rounded-lg transition-colors"
|
||||
>
|
||||
Save Extractor
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
572
atomizer-dashboard/frontend/src/types/atomizer-spec.ts
Normal file
572
atomizer-dashboard/frontend/src/types/atomizer-spec.ts
Normal file
@@ -0,0 +1,572 @@
|
||||
/**
|
||||
* AtomizerSpec v2.0 TypeScript Types
|
||||
*
|
||||
* These types match the JSON Schema at optimization_engine/schemas/atomizer_spec_v2.json
|
||||
* This is the single source of truth for optimization configuration.
|
||||
*/
|
||||
|
||||
// ============================================================================
|
||||
// Position Types
|
||||
// ============================================================================
|
||||
|
||||
export interface CanvasPosition {
|
||||
x: number;
|
||||
y: number;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Meta Types
|
||||
// ============================================================================
|
||||
|
||||
export type SpecCreatedBy = 'canvas' | 'claude' | 'api' | 'migration' | 'manual';
|
||||
|
||||
export interface SpecMeta {
|
||||
/** Schema version (e.g., "2.0") */
|
||||
version: string;
|
||||
/** When the spec was created (ISO 8601) */
|
||||
created?: string;
|
||||
/** When the spec was last modified (ISO 8601) */
|
||||
modified?: string;
|
||||
/** Who/what created the spec */
|
||||
created_by?: SpecCreatedBy;
|
||||
/** Who/what last modified the spec */
|
||||
modified_by?: string;
|
||||
/** Unique study identifier (snake_case) */
|
||||
study_name: string;
|
||||
/** Human-readable description */
|
||||
description?: string;
|
||||
/** Tags for categorization */
|
||||
tags?: string[];
|
||||
/** Real-world engineering context */
|
||||
engineering_context?: string;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Model Types
|
||||
// ============================================================================
|
||||
|
||||
export interface NxPartConfig {
|
||||
/** Path to .prt file */
|
||||
path?: string;
|
||||
/** File hash for change detection */
|
||||
hash?: string;
|
||||
/** Idealized part filename (_i.prt) */
|
||||
idealized_part?: string;
|
||||
}
|
||||
|
||||
export interface FemConfig {
|
||||
/** Path to .fem file */
|
||||
path?: string;
|
||||
/** Number of elements */
|
||||
element_count?: number;
|
||||
/** Number of nodes */
|
||||
node_count?: number;
|
||||
}
|
||||
|
||||
export type SolverType = 'nastran' | 'NX_Nastran' | 'abaqus';
|
||||
export type SubcaseType = 'static' | 'modal' | 'thermal' | 'buckling';
|
||||
|
||||
export interface Subcase {
|
||||
id: number;
|
||||
name?: string;
|
||||
type?: SubcaseType;
|
||||
}
|
||||
|
||||
export interface SimConfig {
|
||||
/** Path to .sim file */
|
||||
path: string;
|
||||
/** Solver type */
|
||||
solver: SolverType;
|
||||
/** Solution type (e.g., SOL101) */
|
||||
solution_type?: string;
|
||||
/** Defined subcases */
|
||||
subcases?: Subcase[];
|
||||
}
|
||||
|
||||
export interface NxSettings {
|
||||
nx_install_path?: string;
|
||||
simulation_timeout_s?: number;
|
||||
auto_start_nx?: boolean;
|
||||
}
|
||||
|
||||
export interface ModelConfig {
|
||||
nx_part?: NxPartConfig;
|
||||
fem?: FemConfig;
|
||||
sim: SimConfig;
|
||||
nx_settings?: NxSettings;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Design Variable Types
|
||||
// ============================================================================
|
||||
|
||||
export type DesignVariableType = 'continuous' | 'integer' | 'categorical';
|
||||
|
||||
export interface DesignVariableBounds {
|
||||
min: number;
|
||||
max: number;
|
||||
}
|
||||
|
||||
export interface DesignVariable {
|
||||
/** Unique identifier (pattern: dv_XXX) */
|
||||
id: string;
|
||||
/** Human-readable name */
|
||||
name: string;
|
||||
/** NX expression name (must match model) */
|
||||
expression_name: string;
|
||||
/** Variable type */
|
||||
type: DesignVariableType;
|
||||
/** Value bounds */
|
||||
bounds: DesignVariableBounds;
|
||||
/** Current/initial value */
|
||||
baseline?: number;
|
||||
/** Physical units (mm, deg, etc.) */
|
||||
units?: string;
|
||||
/** Step size for integer/discrete */
|
||||
step?: number;
|
||||
/** Whether to include in optimization */
|
||||
enabled?: boolean;
|
||||
/** Description */
|
||||
description?: string;
|
||||
/** Canvas position */
|
||||
canvas_position?: CanvasPosition;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Extractor Types
|
||||
// ============================================================================
|
||||
|
||||
export type ExtractorType =
|
||||
| 'displacement'
|
||||
| 'frequency'
|
||||
| 'stress'
|
||||
| 'mass'
|
||||
| 'mass_expression'
|
||||
| 'zernike_opd'
|
||||
| 'zernike_csv'
|
||||
| 'temperature'
|
||||
| 'custom_function';
|
||||
|
||||
export interface ExtractorConfig {
|
||||
/** Inner radius for Zernike (mm) */
|
||||
inner_radius_mm?: number;
|
||||
/** Outer radius for Zernike (mm) */
|
||||
outer_radius_mm?: number;
|
||||
/** Number of Zernike modes */
|
||||
n_modes?: number;
|
||||
/** Low-order modes to filter */
|
||||
filter_low_orders?: number;
|
||||
/** Displacement unit */
|
||||
displacement_unit?: string;
|
||||
/** Reference subcase ID */
|
||||
reference_subcase?: number;
|
||||
/** NX expression name (for mass_expression) */
|
||||
expression_name?: string;
|
||||
/** Mode number (for frequency) */
|
||||
mode_number?: number;
|
||||
/** Element type (for stress) */
|
||||
element_type?: string;
|
||||
/** Result type */
|
||||
result_type?: string;
|
||||
/** Metric type */
|
||||
metric?: string;
|
||||
/** Additional config properties */
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export interface CustomFunction {
|
||||
/** Function name */
|
||||
name?: string;
|
||||
/** Python module path */
|
||||
module?: string;
|
||||
/** Function signature */
|
||||
signature?: string;
|
||||
/** Python source code */
|
||||
source_code?: string;
|
||||
}
|
||||
|
||||
export interface ExtractorOutput {
|
||||
/** Output name (used by objectives/constraints) */
|
||||
name: string;
|
||||
/** Specific metric (max, total, rms, etc.) */
|
||||
metric?: string;
|
||||
/** Subcase ID for this output */
|
||||
subcase?: number;
|
||||
/** Units */
|
||||
units?: string;
|
||||
}
|
||||
|
||||
export interface Extractor {
|
||||
/** Unique identifier (pattern: ext_XXX) */
|
||||
id: string;
|
||||
/** Human-readable name */
|
||||
name: string;
|
||||
/** Extractor type */
|
||||
type: ExtractorType;
|
||||
/** Whether this is a built-in extractor */
|
||||
builtin?: boolean;
|
||||
/** Type-specific configuration */
|
||||
config?: ExtractorConfig;
|
||||
/** Custom function definition (for custom_function type) */
|
||||
function?: CustomFunction;
|
||||
/** Output values this extractor produces */
|
||||
outputs: ExtractorOutput[];
|
||||
/** Canvas position */
|
||||
canvas_position?: CanvasPosition;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Objective Types
|
||||
// ============================================================================
|
||||
|
||||
export type OptimizationDirection = 'minimize' | 'maximize';
|
||||
|
||||
export interface ObjectiveSource {
|
||||
/** Reference to extractor */
|
||||
extractor_id: string;
|
||||
/** Which output from the extractor */
|
||||
output_name: string;
|
||||
}
|
||||
|
||||
export interface Objective {
|
||||
/** Unique identifier (pattern: obj_XXX) */
|
||||
id: string;
|
||||
/** Human-readable name */
|
||||
name: string;
|
||||
/** Optimization direction */
|
||||
direction: OptimizationDirection;
|
||||
/** Weight for weighted sum (multi-objective) */
|
||||
weight?: number;
|
||||
/** Where the value comes from */
|
||||
source: ObjectiveSource;
|
||||
/** Target value (for goal programming) */
|
||||
target?: number;
|
||||
/** Units */
|
||||
units?: string;
|
||||
/** Description */
|
||||
description?: string;
|
||||
/** Canvas position */
|
||||
canvas_position?: CanvasPosition;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Constraint Types
|
||||
// ============================================================================
|
||||
|
||||
export type ConstraintType = 'hard' | 'soft';
|
||||
export type ConstraintOperator = '<=' | '>=' | '<' | '>' | '==';
|
||||
export type PenaltyMethod = 'linear' | 'quadratic' | 'exponential';
|
||||
|
||||
export interface ConstraintSource {
|
||||
extractor_id: string;
|
||||
output_name: string;
|
||||
}
|
||||
|
||||
export interface PenaltyConfig {
|
||||
/** Penalty method */
|
||||
method?: PenaltyMethod;
|
||||
/** Penalty weight */
|
||||
weight?: number;
|
||||
/** Soft margin before penalty kicks in */
|
||||
margin?: number;
|
||||
}
|
||||
|
||||
export interface Constraint {
|
||||
/** Unique identifier (pattern: con_XXX) */
|
||||
id: string;
|
||||
/** Human-readable name */
|
||||
name: string;
|
||||
/** Constraint type */
|
||||
type: ConstraintType;
|
||||
/** Comparison operator */
|
||||
operator: ConstraintOperator;
|
||||
/** Constraint threshold value */
|
||||
threshold: number;
|
||||
/** Where the value comes from */
|
||||
source: ConstraintSource;
|
||||
/** Penalty method configuration */
|
||||
penalty_config?: PenaltyConfig;
|
||||
/** Description */
|
||||
description?: string;
|
||||
/** Canvas position */
|
||||
canvas_position?: CanvasPosition;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Optimization Types
|
||||
// ============================================================================
|
||||
|
||||
export type AlgorithmType = 'TPE' | 'CMA-ES' | 'NSGA-II' | 'RandomSearch' | 'SAT_v3' | 'GP-BO';
|
||||
export type SurrogateType = 'MLP' | 'GNN' | 'ensemble';
|
||||
|
||||
export interface AlgorithmConfig {
|
||||
/** Population size (evolutionary algorithms) */
|
||||
population_size?: number;
|
||||
/** Number of generations */
|
||||
n_generations?: number;
|
||||
/** Mutation probability */
|
||||
mutation_prob?: number | null;
|
||||
/** Crossover probability */
|
||||
crossover_prob?: number;
|
||||
/** Random seed */
|
||||
seed?: number;
|
||||
/** Number of startup trials (TPE) */
|
||||
n_startup_trials?: number;
|
||||
/** Initial sigma (CMA-ES) */
|
||||
sigma0?: number;
|
||||
/** Additional config properties */
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export interface Algorithm {
|
||||
type: AlgorithmType;
|
||||
config?: AlgorithmConfig;
|
||||
}
|
||||
|
||||
export interface OptimizationBudget {
|
||||
/** Maximum number of trials */
|
||||
max_trials?: number;
|
||||
/** Maximum time in hours */
|
||||
max_time_hours?: number;
|
||||
/** Stop if no improvement for N trials */
|
||||
convergence_patience?: number;
|
||||
}
|
||||
|
||||
export interface SurrogateConfig {
|
||||
/** Number of models in ensemble */
|
||||
n_models?: number;
|
||||
/** Network architecture layers */
|
||||
architecture?: number[];
|
||||
/** Retrain every N trials */
|
||||
train_every_n_trials?: number;
|
||||
/** Minimum training samples */
|
||||
min_training_samples?: number;
|
||||
/** Acquisition function candidates */
|
||||
acquisition_candidates?: number;
|
||||
/** FEA validations per round */
|
||||
fea_validations_per_round?: number;
|
||||
}
|
||||
|
||||
export interface Surrogate {
|
||||
enabled?: boolean;
|
||||
type?: SurrogateType;
|
||||
config?: SurrogateConfig;
|
||||
}
|
||||
|
||||
export interface OptimizationConfig {
|
||||
algorithm: Algorithm;
|
||||
budget: OptimizationBudget;
|
||||
surrogate?: Surrogate;
|
||||
canvas_position?: CanvasPosition;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Workflow Types
|
||||
// ============================================================================
|
||||
|
||||
export interface WorkflowStage {
|
||||
id: string;
|
||||
name: string;
|
||||
algorithm?: string;
|
||||
trials?: number;
|
||||
purpose?: string;
|
||||
}
|
||||
|
||||
export interface WorkflowTransition {
|
||||
from: string;
|
||||
to: string;
|
||||
condition?: string;
|
||||
}
|
||||
|
||||
export interface Workflow {
|
||||
stages?: WorkflowStage[];
|
||||
transitions?: WorkflowTransition[];
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Reporting Types
|
||||
// ============================================================================
|
||||
|
||||
export interface InsightConfig {
|
||||
include_html?: boolean;
|
||||
show_pareto_evolution?: boolean;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export interface Insight {
|
||||
type?: string;
|
||||
for_trials?: string;
|
||||
config?: InsightConfig;
|
||||
}
|
||||
|
||||
export interface ReportingConfig {
|
||||
auto_report?: boolean;
|
||||
report_triggers?: string[];
|
||||
insights?: Insight[];
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Canvas Types
|
||||
// ============================================================================
|
||||
|
||||
export interface CanvasViewport {
|
||||
x: number;
|
||||
y: number;
|
||||
zoom: number;
|
||||
}
|
||||
|
||||
export interface CanvasEdge {
|
||||
source: string;
|
||||
target: string;
|
||||
sourceHandle?: string;
|
||||
targetHandle?: string;
|
||||
}
|
||||
|
||||
export interface CanvasGroup {
|
||||
id: string;
|
||||
name: string;
|
||||
node_ids: string[];
|
||||
}
|
||||
|
||||
export interface CanvasConfig {
|
||||
layout_version?: string;
|
||||
viewport?: CanvasViewport;
|
||||
edges?: CanvasEdge[];
|
||||
groups?: CanvasGroup[];
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Main AtomizerSpec Type
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* AtomizerSpec v2.0 - The unified configuration schema for Atomizer optimization studies.
|
||||
*
|
||||
* This is the single source of truth used by:
|
||||
* - Canvas UI (rendering and editing)
|
||||
* - Backend API (validation and storage)
|
||||
* - Claude Assistant (reading and modifying)
|
||||
* - Optimization Engine (execution)
|
||||
*/
|
||||
export interface AtomizerSpec {
|
||||
/** Metadata about the spec */
|
||||
meta: SpecMeta;
|
||||
/** NX model files and configuration */
|
||||
model: ModelConfig;
|
||||
/** Design variables (NX expressions) to optimize */
|
||||
design_variables: DesignVariable[];
|
||||
/** Physics extractors that compute outputs from FEA results */
|
||||
extractors: Extractor[];
|
||||
/** Optimization objectives (minimize/maximize) */
|
||||
objectives: Objective[];
|
||||
/** Hard and soft constraints */
|
||||
constraints?: Constraint[];
|
||||
/** Optimization algorithm configuration */
|
||||
optimization: OptimizationConfig;
|
||||
/** Multi-stage optimization workflow */
|
||||
workflow?: Workflow;
|
||||
/** Reporting configuration */
|
||||
reporting?: ReportingConfig;
|
||||
/** Canvas UI state (persisted for reconstruction) */
|
||||
canvas?: CanvasConfig;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Utility Types for API Responses
|
||||
// ============================================================================
|
||||
|
||||
export interface SpecValidationError {
|
||||
type: 'schema' | 'semantic' | 'reference';
|
||||
path: string[];
|
||||
message: string;
|
||||
}
|
||||
|
||||
export interface SpecValidationWarning {
|
||||
type: string;
|
||||
path: string[];
|
||||
message: string;
|
||||
}
|
||||
|
||||
export interface SpecValidationReport {
|
||||
valid: boolean;
|
||||
errors: SpecValidationError[];
|
||||
warnings: SpecValidationWarning[];
|
||||
summary: {
|
||||
design_variables: number;
|
||||
extractors: number;
|
||||
objectives: number;
|
||||
constraints: number;
|
||||
custom_functions: number;
|
||||
};
|
||||
}
|
||||
|
||||
export interface SpecModification {
|
||||
operation: 'set' | 'add' | 'remove';
|
||||
path: string;
|
||||
value?: unknown;
|
||||
}
|
||||
|
||||
export interface SpecUpdateResult {
|
||||
success: boolean;
|
||||
hash: string;
|
||||
modified: string;
|
||||
modified_by: string;
|
||||
}
|
||||
|
||||
export interface SpecPatchRequest {
|
||||
path: string;
|
||||
value: unknown;
|
||||
modified_by?: string;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Node Types for Canvas
|
||||
// ============================================================================
|
||||
|
||||
export type SpecNodeType =
|
||||
| 'designVar'
|
||||
| 'extractor'
|
||||
| 'objective'
|
||||
| 'constraint'
|
||||
| 'model'
|
||||
| 'solver'
|
||||
| 'algorithm';
|
||||
|
||||
export interface SpecNodeBase {
|
||||
id: string;
|
||||
type: SpecNodeType;
|
||||
position: CanvasPosition;
|
||||
data: Record<string, unknown>;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// WebSocket Types
|
||||
// ============================================================================
|
||||
|
||||
export type SpecSyncMessageType =
|
||||
| 'spec_updated'
|
||||
| 'validation_error'
|
||||
| 'node_added'
|
||||
| 'node_removed'
|
||||
| 'connection_ack';
|
||||
|
||||
export interface SpecSyncMessage {
|
||||
type: SpecSyncMessageType;
|
||||
timestamp: string;
|
||||
hash?: string;
|
||||
modified_by?: string;
|
||||
changes?: Array<{
|
||||
path: string;
|
||||
old: unknown;
|
||||
new: unknown;
|
||||
}>;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export interface SpecClientMessage {
|
||||
type: 'subscribe' | 'patch_node' | 'add_node' | 'remove_node' | 'update_position';
|
||||
study_id: string;
|
||||
node_id?: string;
|
||||
data?: Record<string, unknown>;
|
||||
position?: CanvasPosition;
|
||||
}
|
||||
@@ -22,6 +22,7 @@ import { analysisTools } from "./tools/analysis.js";
|
||||
import { reportingTools } from "./tools/reporting.js";
|
||||
import { physicsTools } from "./tools/physics.js";
|
||||
import { canvasTools } from "./tools/canvas.js";
|
||||
import { specTools } from "./tools/spec.js";
|
||||
import { adminTools } from "./tools/admin.js";
|
||||
import { ATOMIZER_MODE } from "./utils/paths.js";
|
||||
|
||||
@@ -52,6 +53,7 @@ const userTools: AtomizerTool[] = [
|
||||
...reportingTools,
|
||||
...physicsTools,
|
||||
...canvasTools,
|
||||
...specTools,
|
||||
];
|
||||
|
||||
const powerTools: AtomizerTool[] = [
|
||||
|
||||
1175
mcp-server/atomizer-tools/src/tools/spec.ts
Normal file
1175
mcp-server/atomizer-tools/src/tools/spec.ts
Normal file
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,7 @@ Modules:
|
||||
- builder: OptimizationConfigBuilder for creating configs
|
||||
- setup_wizard: Interactive configuration setup
|
||||
- capability_matcher: Match capabilities to requirements
|
||||
- spec_models: AtomizerSpec v2.0 Pydantic models (unified configuration)
|
||||
"""
|
||||
|
||||
# Lazy imports to avoid circular dependencies
|
||||
@@ -31,6 +32,27 @@ def __getattr__(name):
|
||||
elif name == 'TemplateLoader':
|
||||
from .template_loader import TemplateLoader
|
||||
return TemplateLoader
|
||||
elif name == 'AtomizerSpec':
|
||||
from .spec_models import AtomizerSpec
|
||||
return AtomizerSpec
|
||||
elif name == 'SpecValidator':
|
||||
from .spec_validator import SpecValidator
|
||||
return SpecValidator
|
||||
elif name == 'SpecValidationError':
|
||||
from .spec_validator import SpecValidationError
|
||||
return SpecValidationError
|
||||
elif name == 'validate_spec':
|
||||
from .spec_validator import validate_spec
|
||||
return validate_spec
|
||||
elif name == 'SpecMigrator':
|
||||
from .migrator import SpecMigrator
|
||||
return SpecMigrator
|
||||
elif name == 'migrate_config':
|
||||
from .migrator import migrate_config
|
||||
return migrate_config
|
||||
elif name == 'migrate_config_file':
|
||||
from .migrator import migrate_config_file
|
||||
return migrate_config_file
|
||||
raise AttributeError(f"module 'optimization_engine.config' has no attribute '{name}'")
|
||||
|
||||
__all__ = [
|
||||
@@ -40,4 +62,11 @@ __all__ = [
|
||||
'SetupWizard',
|
||||
'CapabilityMatcher',
|
||||
'TemplateLoader',
|
||||
'AtomizerSpec',
|
||||
'SpecValidator',
|
||||
'SpecValidationError',
|
||||
'validate_spec',
|
||||
'SpecMigrator',
|
||||
'migrate_config',
|
||||
'migrate_config_file',
|
||||
]
|
||||
|
||||
844
optimization_engine/config/migrator.py
Normal file
844
optimization_engine/config/migrator.py
Normal file
@@ -0,0 +1,844 @@
|
||||
"""
|
||||
AtomizerSpec v2.0 Migrator
|
||||
|
||||
Converts legacy optimization_config.json files to AtomizerSpec v2.0 format.
|
||||
|
||||
Supports migration from:
|
||||
- Mirror/Zernike configs (extraction_method, zernike_settings)
|
||||
- Structural/Bracket configs (optimization_settings, simulation_settings)
|
||||
- Canvas Intent format (simplified canvas output)
|
||||
|
||||
Migration Rules:
|
||||
- bounds: [min, max] -> bounds: {min, max}
|
||||
- parameter -> expression_name
|
||||
- goal/type: "minimize"/"maximize" -> direction: "minimize"/"maximize"
|
||||
- Infers extractors from objectives and extraction settings
|
||||
- Generates canvas edges automatically
|
||||
"""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||
import json
|
||||
import re
|
||||
|
||||
|
||||
class MigrationError(Exception):
|
||||
"""Raised when migration fails."""
|
||||
pass
|
||||
|
||||
|
||||
class SpecMigrator:
|
||||
"""
|
||||
Migrate old optimization_config.json to AtomizerSpec v2.0.
|
||||
|
||||
Handles multiple legacy formats and infers missing information.
|
||||
"""
|
||||
|
||||
# Extractor type inference based on objective names
|
||||
EXTRACTOR_INFERENCE = {
|
||||
# Zernike patterns
|
||||
r"wfe|zernike|opd": "zernike_opd",
|
||||
r"mfg|manufacturing": "zernike_opd",
|
||||
r"rms": "zernike_opd",
|
||||
# Structural patterns
|
||||
r"displacement|deflection|deform": "displacement",
|
||||
r"stress|von.?mises": "stress",
|
||||
r"frequency|modal|eigen": "frequency",
|
||||
r"mass|weight": "mass",
|
||||
r"stiffness": "displacement", # Stiffness computed from displacement
|
||||
r"temperature|thermal": "temperature",
|
||||
}
|
||||
|
||||
def __init__(self, study_path: Optional[Path] = None):
|
||||
"""
|
||||
Initialize migrator.
|
||||
|
||||
Args:
|
||||
study_path: Path to study directory (for inferring sim/fem paths)
|
||||
"""
|
||||
self.study_path = Path(study_path) if study_path else None
|
||||
self._extractor_counter = 0
|
||||
self._objective_counter = 0
|
||||
self._constraint_counter = 0
|
||||
self._dv_counter = 0
|
||||
|
||||
def migrate(
|
||||
self,
|
||||
old_config: Dict[str, Any],
|
||||
study_name: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Convert old config to AtomizerSpec v2.0.
|
||||
|
||||
Args:
|
||||
old_config: Legacy config dict
|
||||
study_name: Override study name (defaults to config value)
|
||||
|
||||
Returns:
|
||||
AtomizerSpec v2.0 dict
|
||||
"""
|
||||
# Reset counters
|
||||
self._extractor_counter = 0
|
||||
self._objective_counter = 0
|
||||
self._constraint_counter = 0
|
||||
self._dv_counter = 0
|
||||
|
||||
# Detect config type
|
||||
config_type = self._detect_config_type(old_config)
|
||||
|
||||
# Build spec
|
||||
spec = {
|
||||
"meta": self._migrate_meta(old_config, study_name),
|
||||
"model": self._migrate_model(old_config, config_type),
|
||||
"design_variables": self._migrate_design_variables(old_config),
|
||||
"extractors": [],
|
||||
"objectives": [],
|
||||
"constraints": [],
|
||||
"optimization": self._migrate_optimization(old_config, config_type),
|
||||
"canvas": {"edges": [], "layout_version": "2.0"}
|
||||
}
|
||||
|
||||
# Migrate extractors and objectives together (they're linked)
|
||||
extractors, objectives = self._migrate_extractors_and_objectives(old_config, config_type)
|
||||
spec["extractors"] = extractors
|
||||
spec["objectives"] = objectives
|
||||
|
||||
# Migrate constraints
|
||||
spec["constraints"] = self._migrate_constraints(old_config, spec["extractors"])
|
||||
|
||||
# Generate canvas edges
|
||||
spec["canvas"]["edges"] = self._generate_edges(spec)
|
||||
|
||||
# Add workflow if SAT/turbo settings present
|
||||
if self._has_sat_settings(old_config):
|
||||
spec["workflow"] = self._migrate_workflow(old_config)
|
||||
|
||||
return spec
|
||||
|
||||
def migrate_file(
|
||||
self,
|
||||
config_path: Union[str, Path],
|
||||
output_path: Optional[Union[str, Path]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Migrate a config file and optionally save the result.
|
||||
|
||||
Args:
|
||||
config_path: Path to old config file
|
||||
output_path: Path to save new spec (optional)
|
||||
|
||||
Returns:
|
||||
AtomizerSpec v2.0 dict
|
||||
"""
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.exists():
|
||||
raise MigrationError(f"Config file not found: {config_path}")
|
||||
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
old_config = json.load(f)
|
||||
|
||||
# Infer study path from config location
|
||||
if self.study_path is None:
|
||||
# Config is typically in study_dir/1_setup/ or study_dir/
|
||||
if config_path.parent.name == "1_setup":
|
||||
self.study_path = config_path.parent.parent
|
||||
else:
|
||||
self.study_path = config_path.parent
|
||||
|
||||
spec = self.migrate(old_config)
|
||||
|
||||
if output_path:
|
||||
output_path = Path(output_path)
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(spec, f, indent=2, ensure_ascii=False)
|
||||
|
||||
return spec
|
||||
|
||||
# =========================================================================
|
||||
# Detection
|
||||
# =========================================================================
|
||||
|
||||
def _detect_config_type(self, config: Dict) -> str:
|
||||
"""Detect the type of config format."""
|
||||
if "extraction_method" in config or "zernike_settings" in config:
|
||||
return "mirror"
|
||||
elif "simulation_settings" in config or "extraction_settings" in config:
|
||||
return "structural"
|
||||
elif "optimization_settings" in config:
|
||||
return "structural"
|
||||
elif "extractors" in config:
|
||||
# Already partially in new format (canvas intent)
|
||||
return "canvas_intent"
|
||||
else:
|
||||
# Generic/minimal format
|
||||
return "generic"
|
||||
|
||||
def _has_sat_settings(self, config: Dict) -> bool:
|
||||
"""Check if config has SAT/turbo settings."""
|
||||
return (
|
||||
"sat_settings" in config or
|
||||
config.get("optimization", {}).get("algorithm") in ["SAT_v3", "SAT", "turbo"]
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Meta Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_meta(self, config: Dict, study_name: Optional[str]) -> Dict:
|
||||
"""Migrate metadata section."""
|
||||
now = datetime.now(timezone.utc).isoformat().replace('+00:00', 'Z')
|
||||
|
||||
name = study_name or config.get("study_name", "migrated_study")
|
||||
# Ensure snake_case
|
||||
name = re.sub(r'[^a-z0-9_]', '_', name.lower())
|
||||
name = re.sub(r'_+', '_', name).strip('_')
|
||||
|
||||
meta = {
|
||||
"version": "2.0",
|
||||
"created": now,
|
||||
"modified": now,
|
||||
"created_by": "migration",
|
||||
"modified_by": "migration",
|
||||
"study_name": name,
|
||||
"description": config.get("description", ""),
|
||||
"tags": []
|
||||
}
|
||||
|
||||
# Extract tags from various sources
|
||||
if "study_tag" in config:
|
||||
meta["tags"].append(config["study_tag"])
|
||||
|
||||
if "business_context" in config:
|
||||
meta["engineering_context"] = config["business_context"].get("purpose", "")
|
||||
|
||||
# Infer tags from config type
|
||||
if "zernike_settings" in config:
|
||||
meta["tags"].extend(["mirror", "zernike"])
|
||||
if "extraction_method" in config:
|
||||
if config["extraction_method"].get("type") == "zernike_opd":
|
||||
meta["tags"].append("opd")
|
||||
|
||||
return meta
|
||||
|
||||
# =========================================================================
|
||||
# Model Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_model(self, config: Dict, config_type: str) -> Dict:
|
||||
"""Migrate model section (sim/fem/prt paths)."""
|
||||
model = {
|
||||
"sim": {
|
||||
"path": "",
|
||||
"solver": "nastran"
|
||||
}
|
||||
}
|
||||
|
||||
# Extract from nx_settings (mirror format)
|
||||
if "nx_settings" in config:
|
||||
nx = config["nx_settings"]
|
||||
model["sim"]["path"] = nx.get("sim_file", "")
|
||||
if "nx_install_path" in nx:
|
||||
model["nx_settings"] = {
|
||||
"nx_install_path": nx["nx_install_path"],
|
||||
"simulation_timeout_s": nx.get("simulation_timeout_s", 600)
|
||||
}
|
||||
|
||||
# Extract from simulation_settings (structural format)
|
||||
elif "simulation_settings" in config:
|
||||
sim = config["simulation_settings"]
|
||||
model["sim"]["path"] = sim.get("sim_file", "")
|
||||
solver = sim.get("solver", "nastran").lower()
|
||||
# Normalize solver name - valid values: nastran, NX_Nastran, abaqus
|
||||
solver_map = {"nx": "nastran", "nx_nastran": "NX_Nastran", "nxnastran": "NX_Nastran"}
|
||||
model["sim"]["solver"] = solver_map.get(solver, "nastran" if solver not in ["nastran", "NX_Nastran", "abaqus"] else solver)
|
||||
if sim.get("solution_type"):
|
||||
model["sim"]["solution_type"] = sim["solution_type"]
|
||||
|
||||
if sim.get("model_file"):
|
||||
model["nx_part"] = {"path": sim["model_file"]}
|
||||
if sim.get("fem_file"):
|
||||
model["fem"] = {"path": sim["fem_file"]}
|
||||
|
||||
# Try to infer from study path
|
||||
if self.study_path and not model["sim"]["path"]:
|
||||
setup_dir = self.study_path / "1_setup" / "model"
|
||||
if setup_dir.exists():
|
||||
for f in setup_dir.glob("*.sim"):
|
||||
model["sim"]["path"] = str(f.relative_to(self.study_path))
|
||||
break
|
||||
|
||||
return model
|
||||
|
||||
# =========================================================================
|
||||
# Design Variables Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_design_variables(self, config: Dict) -> List[Dict]:
|
||||
"""Migrate design variables."""
|
||||
dvs = []
|
||||
|
||||
for dv in config.get("design_variables", []):
|
||||
self._dv_counter += 1
|
||||
|
||||
# Handle different bound formats
|
||||
if "bounds" in dv:
|
||||
if isinstance(dv["bounds"], list):
|
||||
bounds = {"min": dv["bounds"][0], "max": dv["bounds"][1]}
|
||||
else:
|
||||
bounds = dv["bounds"]
|
||||
else:
|
||||
bounds = {"min": dv.get("min", 0), "max": dv.get("max", 1)}
|
||||
|
||||
# Ensure min < max (fix degenerate cases)
|
||||
if bounds["min"] >= bounds["max"]:
|
||||
# Expand bounds slightly around the value
|
||||
val = bounds["min"]
|
||||
if val == 0:
|
||||
bounds = {"min": -0.001, "max": 0.001}
|
||||
else:
|
||||
bounds = {"min": val * 0.99, "max": val * 1.01}
|
||||
|
||||
# Determine type
|
||||
dv_type = dv.get("type", "continuous")
|
||||
if dv_type not in ["continuous", "integer", "categorical"]:
|
||||
dv_type = "continuous"
|
||||
|
||||
new_dv = {
|
||||
"id": f"dv_{self._dv_counter:03d}",
|
||||
"name": dv.get("name", f"param_{self._dv_counter}"),
|
||||
"expression_name": dv.get("expression_name", dv.get("parameter", dv.get("name", ""))),
|
||||
"type": dv_type,
|
||||
"bounds": bounds,
|
||||
"baseline": dv.get("baseline", dv.get("initial")),
|
||||
"units": dv.get("units", dv.get("unit", "")),
|
||||
"enabled": dv.get("enabled", True),
|
||||
"description": dv.get("description", dv.get("notes", "")),
|
||||
"canvas_position": {"x": 50, "y": 100 + (self._dv_counter - 1) * 80}
|
||||
}
|
||||
|
||||
dvs.append(new_dv)
|
||||
|
||||
return dvs
|
||||
|
||||
# =========================================================================
|
||||
# Extractors and Objectives Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_extractors_and_objectives(
|
||||
self,
|
||||
config: Dict,
|
||||
config_type: str
|
||||
) -> Tuple[List[Dict], List[Dict]]:
|
||||
"""
|
||||
Migrate extractors and objectives together.
|
||||
|
||||
Returns tuple of (extractors, objectives).
|
||||
"""
|
||||
extractors = []
|
||||
objectives = []
|
||||
|
||||
# Handle mirror/zernike configs
|
||||
if config_type == "mirror" and "zernike_settings" in config:
|
||||
extractor = self._create_zernike_extractor(config)
|
||||
extractors.append(extractor)
|
||||
|
||||
# Create objectives from config
|
||||
for obj in config.get("objectives", []):
|
||||
self._objective_counter += 1
|
||||
objectives.append(self._create_objective(obj, extractor["id"]))
|
||||
|
||||
# Handle structural configs
|
||||
elif config_type == "structural":
|
||||
# Create extractors based on extraction_settings
|
||||
if "extraction_settings" in config:
|
||||
extractor = self._create_structural_extractor(config)
|
||||
extractors.append(extractor)
|
||||
ext_id = extractor["id"]
|
||||
else:
|
||||
# Infer extractors from objectives
|
||||
ext_id = None
|
||||
|
||||
for obj in config.get("objectives", []):
|
||||
self._objective_counter += 1
|
||||
|
||||
# Infer extractor if not yet created
|
||||
if ext_id is None:
|
||||
inferred_type = self._infer_extractor_type(obj.get("name", ""))
|
||||
ext_id = self._get_or_create_extractor(extractors, inferred_type, obj.get("name", ""))
|
||||
|
||||
objectives.append(self._create_objective(obj, ext_id))
|
||||
|
||||
# Handle canvas intent or generic
|
||||
else:
|
||||
# Pass through existing extractors if present
|
||||
for ext in config.get("extractors", []):
|
||||
self._extractor_counter += 1
|
||||
ext_copy = dict(ext)
|
||||
if "id" not in ext_copy:
|
||||
ext_copy["id"] = f"ext_{self._extractor_counter:03d}"
|
||||
extractors.append(ext_copy)
|
||||
|
||||
# Create objectives
|
||||
for obj in config.get("objectives", []):
|
||||
self._objective_counter += 1
|
||||
|
||||
# Find or create extractor
|
||||
ext_id = None
|
||||
if extractors:
|
||||
ext_id = extractors[0]["id"]
|
||||
else:
|
||||
inferred_type = self._infer_extractor_type(obj.get("name", ""))
|
||||
ext_id = self._get_or_create_extractor(extractors, inferred_type, obj.get("name", ""))
|
||||
|
||||
objectives.append(self._create_objective(obj, ext_id))
|
||||
|
||||
return extractors, objectives
|
||||
|
||||
def _create_zernike_extractor(self, config: Dict) -> Dict:
|
||||
"""Create a Zernike OPD extractor from config."""
|
||||
self._extractor_counter += 1
|
||||
|
||||
zs = config.get("zernike_settings", {})
|
||||
em = config.get("extraction_method", {})
|
||||
|
||||
# Collect all output names from objectives
|
||||
outputs = []
|
||||
for obj in config.get("objectives", []):
|
||||
obj_name = obj.get("name", "")
|
||||
outputs.append({
|
||||
"name": obj_name,
|
||||
"metric": "filtered_rms_nm"
|
||||
})
|
||||
|
||||
# Get outer radius with sensible default for telescope mirrors
|
||||
outer_radius = em.get("outer_radius", zs.get("outer_radius"))
|
||||
if outer_radius is None:
|
||||
# Default to typical M1 mirror outer radius
|
||||
outer_radius = 500.0
|
||||
|
||||
extractor = {
|
||||
"id": f"ext_{self._extractor_counter:03d}",
|
||||
"name": "Zernike WFE Extractor",
|
||||
"type": "zernike_opd",
|
||||
"builtin": True,
|
||||
"config": {
|
||||
"inner_radius_mm": em.get("inner_radius", zs.get("inner_radius", 0)),
|
||||
"outer_radius_mm": outer_radius,
|
||||
"n_modes": zs.get("n_modes", 40),
|
||||
"filter_low_orders": zs.get("filter_low_orders", 4),
|
||||
"displacement_unit": zs.get("displacement_unit", "mm"),
|
||||
"reference_subcase": int(zs.get("reference_subcase", 1))
|
||||
},
|
||||
"outputs": outputs,
|
||||
"canvas_position": {"x": 740, "y": 100}
|
||||
}
|
||||
|
||||
return extractor
|
||||
|
||||
def _create_structural_extractor(self, config: Dict) -> Dict:
|
||||
"""Create extractor from extraction_settings."""
|
||||
self._extractor_counter += 1
|
||||
|
||||
es = config.get("extraction_settings", {})
|
||||
|
||||
# Infer type from extractor class name
|
||||
extractor_class = es.get("extractor_class", "")
|
||||
if "stiffness" in extractor_class.lower():
|
||||
ext_type = "displacement"
|
||||
elif "stress" in extractor_class.lower():
|
||||
ext_type = "stress"
|
||||
elif "frequency" in extractor_class.lower():
|
||||
ext_type = "frequency"
|
||||
else:
|
||||
ext_type = "displacement"
|
||||
|
||||
# Create outputs from objectives
|
||||
outputs = []
|
||||
for obj in config.get("objectives", []):
|
||||
outputs.append({
|
||||
"name": obj.get("name", "output"),
|
||||
"metric": es.get("displacement_aggregation", "max")
|
||||
})
|
||||
|
||||
extractor = {
|
||||
"id": f"ext_{self._extractor_counter:03d}",
|
||||
"name": f"{extractor_class or 'Results'} Extractor",
|
||||
"type": ext_type,
|
||||
"builtin": True,
|
||||
"config": {
|
||||
"result_type": es.get("displacement_component", "z"),
|
||||
"metric": es.get("displacement_aggregation", "max")
|
||||
},
|
||||
"outputs": outputs,
|
||||
"canvas_position": {"x": 740, "y": 100}
|
||||
}
|
||||
|
||||
return extractor
|
||||
|
||||
def _infer_extractor_type(self, objective_name: str) -> str:
|
||||
"""Infer extractor type from objective name."""
|
||||
name_lower = objective_name.lower()
|
||||
|
||||
for pattern, ext_type in self.EXTRACTOR_INFERENCE.items():
|
||||
if re.search(pattern, name_lower):
|
||||
return ext_type
|
||||
|
||||
return "displacement" # Default
|
||||
|
||||
def _get_or_create_extractor(
|
||||
self,
|
||||
extractors: List[Dict],
|
||||
ext_type: str,
|
||||
output_name: str
|
||||
) -> str:
|
||||
"""Get existing extractor of type or create new one."""
|
||||
# Look for existing
|
||||
for ext in extractors:
|
||||
if ext.get("type") == ext_type:
|
||||
# Add output if not present
|
||||
output_names = {o["name"] for o in ext.get("outputs", [])}
|
||||
if output_name not in output_names:
|
||||
ext["outputs"].append({"name": output_name, "metric": "total"})
|
||||
return ext["id"]
|
||||
|
||||
# Create new
|
||||
self._extractor_counter += 1
|
||||
ext_id = f"ext_{self._extractor_counter:03d}"
|
||||
|
||||
extractor = {
|
||||
"id": ext_id,
|
||||
"name": f"{ext_type.title()} Extractor",
|
||||
"type": ext_type,
|
||||
"builtin": True,
|
||||
"outputs": [{"name": output_name, "metric": "total"}],
|
||||
"canvas_position": {"x": 740, "y": 100 + (len(extractors)) * 150}
|
||||
}
|
||||
|
||||
extractors.append(extractor)
|
||||
return ext_id
|
||||
|
||||
def _create_objective(self, obj: Dict, extractor_id: str) -> Dict:
|
||||
"""Create objective from old format."""
|
||||
# Normalize direction
|
||||
direction = obj.get("direction", obj.get("type", obj.get("goal", "minimize")))
|
||||
if direction not in ["minimize", "maximize"]:
|
||||
direction = "minimize" if "min" in direction.lower() else "maximize"
|
||||
|
||||
obj_name = obj.get("name", f"objective_{self._objective_counter}")
|
||||
|
||||
return {
|
||||
"id": f"obj_{self._objective_counter:03d}",
|
||||
"name": obj.get("description", obj_name),
|
||||
"direction": direction,
|
||||
"weight": obj.get("weight", 1.0),
|
||||
"source": {
|
||||
"extractor_id": extractor_id,
|
||||
"output_name": obj_name
|
||||
},
|
||||
"target": obj.get("target"),
|
||||
"units": obj.get("units", ""),
|
||||
"canvas_position": {"x": 1020, "y": 100 + (self._objective_counter - 1) * 100}
|
||||
}
|
||||
|
||||
# =========================================================================
|
||||
# Constraints Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_constraints(self, config: Dict, extractors: List[Dict]) -> List[Dict]:
|
||||
"""Migrate constraints."""
|
||||
constraints = []
|
||||
|
||||
for con in config.get("constraints", []):
|
||||
self._constraint_counter += 1
|
||||
|
||||
# Determine constraint type
|
||||
con_type = con.get("type", "hard")
|
||||
if con_type not in ["hard", "soft"]:
|
||||
# Infer from type field
|
||||
if con_type in ["less_than", "greater_than", "less_equal", "greater_equal"]:
|
||||
con_type = "hard"
|
||||
|
||||
# Determine operator
|
||||
operator = con.get("operator", "<=")
|
||||
old_type = con.get("type", "")
|
||||
if "less" in old_type:
|
||||
operator = "<=" if "equal" in old_type else "<"
|
||||
elif "greater" in old_type:
|
||||
operator = ">=" if "equal" in old_type else ">"
|
||||
|
||||
# Try to parse expression for threshold
|
||||
threshold = con.get("threshold", con.get("value"))
|
||||
if threshold is None and "expression" in con:
|
||||
# Parse from expression like "mass_kg <= 120.0"
|
||||
match = re.search(r'([<>=!]+)\s*([\d.]+)', con["expression"])
|
||||
if match:
|
||||
operator = match.group(1)
|
||||
threshold = float(match.group(2))
|
||||
|
||||
# Find or create extractor for constraint
|
||||
con_name = con.get("name", "constraint")
|
||||
extractor_id = None
|
||||
output_name = con_name
|
||||
|
||||
# Check if name matches existing objective (share extractor)
|
||||
for ext in extractors:
|
||||
for out in ext.get("outputs", []):
|
||||
if con_name.replace("_max", "").replace("_min", "") in out["name"]:
|
||||
extractor_id = ext["id"]
|
||||
output_name = out["name"]
|
||||
break
|
||||
if extractor_id:
|
||||
break
|
||||
|
||||
# If no match, use first extractor or create mass extractor for mass constraints
|
||||
if extractor_id is None:
|
||||
if "mass" in con_name.lower():
|
||||
# Check if mass extractor exists
|
||||
for ext in extractors:
|
||||
if ext.get("type") == "mass":
|
||||
extractor_id = ext["id"]
|
||||
break
|
||||
|
||||
if extractor_id is None:
|
||||
# Create mass extractor
|
||||
ext_id = f"ext_{len(extractors) + 1:03d}"
|
||||
extractors.append({
|
||||
"id": ext_id,
|
||||
"name": "Mass Extractor",
|
||||
"type": "mass",
|
||||
"builtin": True,
|
||||
"outputs": [{"name": "mass_kg", "metric": "total"}],
|
||||
"canvas_position": {"x": 740, "y": 100 + len(extractors) * 150}
|
||||
})
|
||||
extractor_id = ext_id
|
||||
output_name = "mass_kg"
|
||||
elif extractors:
|
||||
extractor_id = extractors[0]["id"]
|
||||
output_name = extractors[0]["outputs"][0]["name"] if extractors[0].get("outputs") else con_name
|
||||
|
||||
constraint = {
|
||||
"id": f"con_{self._constraint_counter:03d}",
|
||||
"name": con.get("description", con_name),
|
||||
"type": con_type if con_type in ["hard", "soft"] else "hard",
|
||||
"operator": operator,
|
||||
"threshold": threshold or 0,
|
||||
"source": {
|
||||
"extractor_id": extractor_id or "ext_001",
|
||||
"output_name": output_name
|
||||
},
|
||||
"penalty_config": {
|
||||
"method": "quadratic",
|
||||
"weight": con.get("penalty_weight", 1000.0)
|
||||
},
|
||||
"canvas_position": {"x": 1020, "y": 400 + (self._constraint_counter - 1) * 100}
|
||||
}
|
||||
|
||||
constraints.append(constraint)
|
||||
|
||||
return constraints
|
||||
|
||||
# =========================================================================
|
||||
# Optimization Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_optimization(self, config: Dict, config_type: str) -> Dict:
|
||||
"""Migrate optimization settings."""
|
||||
# Extract from different locations
|
||||
if "optimization" in config:
|
||||
opt = config["optimization"]
|
||||
elif "optimization_settings" in config:
|
||||
opt = config["optimization_settings"]
|
||||
else:
|
||||
opt = {}
|
||||
|
||||
# Normalize algorithm name
|
||||
algo = opt.get("algorithm", opt.get("sampler", "TPE"))
|
||||
algo_map = {
|
||||
"tpe": "TPE",
|
||||
"tpesampler": "TPE",
|
||||
"cma-es": "CMA-ES",
|
||||
"cmaes": "CMA-ES",
|
||||
"nsga-ii": "NSGA-II",
|
||||
"nsgaii": "NSGA-II",
|
||||
"nsga2": "NSGA-II",
|
||||
"random": "RandomSearch",
|
||||
"randomsampler": "RandomSearch",
|
||||
"randomsearch": "RandomSearch",
|
||||
"sat": "SAT_v3",
|
||||
"sat_v3": "SAT_v3",
|
||||
"turbo": "SAT_v3",
|
||||
"gp": "GP-BO",
|
||||
"gp-bo": "GP-BO",
|
||||
"gpbo": "GP-BO",
|
||||
"bo": "GP-BO",
|
||||
"bayesian": "GP-BO"
|
||||
}
|
||||
# Valid algorithm types for schema
|
||||
valid_algorithms = {"TPE", "CMA-ES", "NSGA-II", "RandomSearch", "SAT_v3", "GP-BO"}
|
||||
algo = algo_map.get(algo.lower(), algo)
|
||||
# Fallback to TPE if still invalid
|
||||
if algo not in valid_algorithms:
|
||||
algo = "TPE"
|
||||
|
||||
optimization = {
|
||||
"algorithm": {
|
||||
"type": algo,
|
||||
"config": {}
|
||||
},
|
||||
"budget": {
|
||||
"max_trials": opt.get("n_trials", 100)
|
||||
},
|
||||
"canvas_position": {"x": 1300, "y": 150}
|
||||
}
|
||||
|
||||
# Algorithm-specific config
|
||||
if algo == "CMA-ES":
|
||||
optimization["algorithm"]["config"]["sigma0"] = opt.get("sigma0", 0.3)
|
||||
elif algo == "NSGA-II":
|
||||
optimization["algorithm"]["config"]["population_size"] = opt.get("population_size", 50)
|
||||
elif algo == "TPE":
|
||||
optimization["algorithm"]["config"]["n_startup_trials"] = opt.get("n_startup_trials", 10)
|
||||
|
||||
# Seed
|
||||
if "seed" in opt:
|
||||
optimization["algorithm"]["config"]["seed"] = opt["seed"]
|
||||
|
||||
# Timeout/patience
|
||||
if opt.get("timeout"):
|
||||
optimization["budget"]["max_time_hours"] = opt["timeout"] / 3600
|
||||
|
||||
# SAT/surrogate settings
|
||||
if "sat_settings" in config:
|
||||
sat = config["sat_settings"]
|
||||
optimization["surrogate"] = {
|
||||
"enabled": True,
|
||||
"type": "ensemble",
|
||||
"config": {
|
||||
"n_models": sat.get("n_ensemble_models", 10),
|
||||
"architecture": sat.get("hidden_dims", [256, 128]),
|
||||
"train_every_n_trials": sat.get("retrain_frequency", 20),
|
||||
"min_training_samples": sat.get("min_samples", 30)
|
||||
}
|
||||
}
|
||||
|
||||
return optimization
|
||||
|
||||
# =========================================================================
|
||||
# Workflow Migration
|
||||
# =========================================================================
|
||||
|
||||
def _migrate_workflow(self, config: Dict) -> Dict:
|
||||
"""Migrate SAT/turbo workflow settings."""
|
||||
sat = config.get("sat_settings", {})
|
||||
|
||||
exploration_trials = sat.get("min_samples", 30)
|
||||
total_trials = config.get("optimization", {}).get("n_trials", 100)
|
||||
|
||||
return {
|
||||
"stages": [
|
||||
{
|
||||
"id": "stage_exploration",
|
||||
"name": "Design Space Exploration",
|
||||
"algorithm": "RandomSearch",
|
||||
"trials": exploration_trials,
|
||||
"purpose": "Build initial training data for surrogate"
|
||||
},
|
||||
{
|
||||
"id": "stage_optimization",
|
||||
"name": "Surrogate-Assisted Optimization",
|
||||
"algorithm": "SAT_v3",
|
||||
"trials": total_trials - exploration_trials,
|
||||
"purpose": "Neural-accelerated optimization"
|
||||
}
|
||||
],
|
||||
"transitions": [
|
||||
{
|
||||
"from": "stage_exploration",
|
||||
"to": "stage_optimization",
|
||||
"condition": f"trial_count >= {exploration_trials}"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# =========================================================================
|
||||
# Canvas Edge Generation
|
||||
# =========================================================================
|
||||
|
||||
def _generate_edges(self, spec: Dict) -> List[Dict]:
|
||||
"""Generate canvas edges connecting nodes."""
|
||||
edges = []
|
||||
|
||||
# DVs -> model
|
||||
for dv in spec.get("design_variables", []):
|
||||
edges.append({"source": dv["id"], "target": "model"})
|
||||
|
||||
# model -> solver
|
||||
edges.append({"source": "model", "target": "solver"})
|
||||
|
||||
# solver -> extractors
|
||||
for ext in spec.get("extractors", []):
|
||||
edges.append({"source": "solver", "target": ext["id"]})
|
||||
|
||||
# extractors -> objectives
|
||||
for obj in spec.get("objectives", []):
|
||||
ext_id = obj.get("source", {}).get("extractor_id")
|
||||
if ext_id:
|
||||
edges.append({"source": ext_id, "target": obj["id"]})
|
||||
|
||||
# extractors -> constraints
|
||||
for con in spec.get("constraints", []):
|
||||
ext_id = con.get("source", {}).get("extractor_id")
|
||||
if ext_id:
|
||||
edges.append({"source": ext_id, "target": con["id"]})
|
||||
|
||||
# objectives -> optimization
|
||||
for obj in spec.get("objectives", []):
|
||||
edges.append({"source": obj["id"], "target": "optimization"})
|
||||
|
||||
# constraints -> optimization
|
||||
for con in spec.get("constraints", []):
|
||||
edges.append({"source": con["id"], "target": "optimization"})
|
||||
|
||||
return edges
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Convenience Functions
|
||||
# ============================================================================
|
||||
|
||||
def migrate_config(
|
||||
old_config: Dict[str, Any],
|
||||
study_name: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Migrate old config dict to AtomizerSpec v2.0.
|
||||
|
||||
Args:
|
||||
old_config: Legacy config dict
|
||||
study_name: Override study name
|
||||
|
||||
Returns:
|
||||
AtomizerSpec v2.0 dict
|
||||
"""
|
||||
migrator = SpecMigrator()
|
||||
return migrator.migrate(old_config, study_name)
|
||||
|
||||
|
||||
def migrate_config_file(
|
||||
config_path: Union[str, Path],
|
||||
output_path: Optional[Union[str, Path]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Migrate a config file to AtomizerSpec v2.0.
|
||||
|
||||
Args:
|
||||
config_path: Path to old config file
|
||||
output_path: Path to save new spec (optional)
|
||||
|
||||
Returns:
|
||||
AtomizerSpec v2.0 dict
|
||||
"""
|
||||
migrator = SpecMigrator()
|
||||
return migrator.migrate_file(config_path, output_path)
|
||||
@@ -11,6 +11,7 @@ Available extractors:
|
||||
- SPC Forces: extract_spc_forces, extract_total_reaction_force
|
||||
- Zernike: extract_zernike_from_op2, ZernikeExtractor (telescope mirrors)
|
||||
- Part Introspection: introspect_part (comprehensive NX .prt analysis)
|
||||
- Custom: CustomExtractorLoader for user-defined Python extractors
|
||||
|
||||
Phase 2 Extractors (2025-12-06):
|
||||
- Principal stress extraction (sigma1, sigma2, sigma3)
|
||||
@@ -25,6 +26,10 @@ Phase 3 Extractors (2025-12-06):
|
||||
|
||||
Phase 4 Extractors (2025-12-19):
|
||||
- Part Introspection (E12): Comprehensive .prt analysis (expressions, mass, materials, attributes, groups, features)
|
||||
|
||||
Phase 5 Extractors (2026-01-17):
|
||||
- Custom Extractor Loader: Dynamic loading and execution of user-defined Python extractors
|
||||
from AtomizerSpec v2.0 (sandboxed execution with security validation)
|
||||
"""
|
||||
|
||||
# Zernike extractor for telescope mirror optimization (standard Z-only method)
|
||||
@@ -119,6 +124,26 @@ from optimization_engine.extractors.introspect_part import (
|
||||
print_introspection_summary,
|
||||
)
|
||||
|
||||
# Custom extractor loader (Phase 5) - dynamic Python extractors from AtomizerSpec v2.0
|
||||
from optimization_engine.extractors.custom_extractor_loader import (
|
||||
CustomExtractor,
|
||||
CustomExtractorLoader,
|
||||
CustomExtractorContext,
|
||||
ExtractorSecurityError,
|
||||
ExtractorValidationError,
|
||||
load_custom_extractors,
|
||||
execute_custom_extractor,
|
||||
validate_custom_extractor,
|
||||
)
|
||||
|
||||
# Spec extractor builder - builds extractors from AtomizerSpec
|
||||
from optimization_engine.extractors.spec_extractor_builder import (
|
||||
SpecExtractorBuilder,
|
||||
build_extractors_from_spec,
|
||||
get_extractor_outputs,
|
||||
list_available_builtin_extractors,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Part mass & material (from .prt)
|
||||
'extract_part_mass_material',
|
||||
@@ -174,4 +199,18 @@ __all__ = [
|
||||
'get_expressions_dict',
|
||||
'get_expression_value',
|
||||
'print_introspection_summary',
|
||||
# Custom extractor loader (Phase 5)
|
||||
'CustomExtractor',
|
||||
'CustomExtractorLoader',
|
||||
'CustomExtractorContext',
|
||||
'ExtractorSecurityError',
|
||||
'ExtractorValidationError',
|
||||
'load_custom_extractors',
|
||||
'execute_custom_extractor',
|
||||
'validate_custom_extractor',
|
||||
# Spec extractor builder
|
||||
'SpecExtractorBuilder',
|
||||
'build_extractors_from_spec',
|
||||
'get_extractor_outputs',
|
||||
'list_available_builtin_extractors',
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user