refactor: Major reorganization of optimization_engine module structure
BREAKING CHANGE: Module paths have been reorganized for better maintainability. Backwards compatibility aliases with deprecation warnings are provided. New Structure: - core/ - Optimization runners (runner, intelligent_optimizer, etc.) - processors/ - Data processing - surrogates/ - Neural network surrogates - nx/ - NX/Nastran integration (solver, updater, session_manager) - study/ - Study management (creator, wizard, state, reset) - reporting/ - Reports and analysis (visualizer, report_generator) - config/ - Configuration management (manager, builder) - utils/ - Utilities (logger, auto_doc, etc.) - future/ - Research/experimental code Migration: - ~200 import changes across 125 files - All __init__.py files use lazy loading to avoid circular imports - Backwards compatibility layer supports old import paths with warnings - All existing functionality preserved To migrate existing code: OLD: from optimization_engine.nx_solver import NXSolver NEW: from optimization_engine.nx.solver import NXSolver OLD: from optimization_engine.runner import OptimizationRunner NEW: from optimization_engine.core.runner import OptimizationRunner 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -21,8 +21,8 @@ import importlib.util
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
|
||||
from optimization_engine.pynastran_research_agent import PyNastranResearchAgent, ExtractionPattern
|
||||
from optimization_engine.extractor_library import ExtractorLibrary, create_study_manifest
|
||||
from optimization_engine.future.pynastran_research_agent import PyNastranResearchAgent, ExtractionPattern
|
||||
from optimization_engine.extractors.extractor_library import ExtractorLibrary, create_study_manifest
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
402
optimization_engine/future/pynastran_research_agent.py
Normal file
402
optimization_engine/future/pynastran_research_agent.py
Normal file
@@ -0,0 +1,402 @@
|
||||
"""
|
||||
pyNastran Research Agent - Phase 3
|
||||
|
||||
Automated research and code generation for OP2 result extraction using pyNastran.
|
||||
|
||||
This agent:
|
||||
1. Searches pyNastran documentation
|
||||
2. Finds relevant APIs for extraction tasks
|
||||
3. Generates executable Python code for extractors
|
||||
4. Stores patterns in knowledge base
|
||||
|
||||
Author: Atomizer Development Team
|
||||
Version: 0.1.0 (Phase 3)
|
||||
Last Updated: 2025-01-16
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
import json
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtractionPattern:
|
||||
"""Represents a learned pattern for OP2 extraction."""
|
||||
name: str
|
||||
description: str
|
||||
element_type: Optional[str] # e.g., 'CBAR', 'CQUAD4', None for general
|
||||
result_type: str # 'force', 'stress', 'displacement', 'strain'
|
||||
code_template: str
|
||||
api_path: str # e.g., 'model.cbar_force[subcase]'
|
||||
data_structure: str # Description of data array structure
|
||||
examples: List[str] # Example usage
|
||||
|
||||
|
||||
class PyNastranResearchAgent:
|
||||
"""
|
||||
Research agent for pyNastran documentation and code generation.
|
||||
|
||||
Uses a combination of:
|
||||
- Pre-learned patterns from documentation
|
||||
- WebFetch for dynamic lookup (future)
|
||||
- Knowledge base caching
|
||||
"""
|
||||
|
||||
def __init__(self, knowledge_base_path: Optional[Path] = None):
|
||||
"""
|
||||
Initialize the research agent.
|
||||
|
||||
Args:
|
||||
knowledge_base_path: Path to store learned patterns
|
||||
"""
|
||||
if knowledge_base_path is None:
|
||||
knowledge_base_path = Path(__file__).parent.parent / "knowledge_base" / "pynastran_patterns"
|
||||
|
||||
self.knowledge_base_path = Path(knowledge_base_path)
|
||||
self.knowledge_base_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Initialize with core patterns from documentation research
|
||||
self.patterns = self._initialize_core_patterns()
|
||||
|
||||
def _initialize_core_patterns(self) -> Dict[str, ExtractionPattern]:
|
||||
"""Initialize core extraction patterns from pyNastran docs."""
|
||||
patterns = {}
|
||||
|
||||
# Displacement extraction
|
||||
patterns['displacement'] = ExtractionPattern(
|
||||
name='displacement',
|
||||
description='Extract displacement results',
|
||||
element_type=None,
|
||||
result_type='displacement',
|
||||
code_template='''def extract_displacement(op2_file: Path, subcase: int = 1):
|
||||
"""Extract displacement results from OP2 file."""
|
||||
from pyNastran.op2.op2 import OP2
|
||||
import numpy as np
|
||||
|
||||
model = OP2()
|
||||
model.read_op2(str(op2_file))
|
||||
|
||||
disp = model.displacements[subcase]
|
||||
itime = 0 # static case
|
||||
|
||||
# Extract translation components
|
||||
txyz = disp.data[itime, :, :3] # [tx, ty, tz]
|
||||
|
||||
# Calculate total displacement
|
||||
total_disp = np.linalg.norm(txyz, axis=1)
|
||||
max_disp = np.max(total_disp)
|
||||
|
||||
# Get node info
|
||||
node_ids = [nid for (nid, grid_type) in disp.node_gridtype]
|
||||
max_disp_node = node_ids[np.argmax(total_disp)]
|
||||
|
||||
return {
|
||||
'max_displacement': float(max_disp),
|
||||
'max_disp_node': int(max_disp_node),
|
||||
'max_disp_x': float(np.max(np.abs(txyz[:, 0]))),
|
||||
'max_disp_y': float(np.max(np.abs(txyz[:, 1]))),
|
||||
'max_disp_z': float(np.max(np.abs(txyz[:, 2])))
|
||||
}''',
|
||||
api_path='model.displacements[subcase]',
|
||||
data_structure='data[itime, :, :6] where :6=[tx, ty, tz, rx, ry, rz]',
|
||||
examples=['max_disp = extract_displacement(Path("results.op2"))']
|
||||
)
|
||||
|
||||
# Stress extraction (solid elements)
|
||||
patterns['solid_stress'] = ExtractionPattern(
|
||||
name='solid_stress',
|
||||
description='Extract stress from solid elements (CTETRA, CHEXA)',
|
||||
element_type='CTETRA',
|
||||
result_type='stress',
|
||||
code_template='''def extract_solid_stress(op2_file: Path, subcase: int = 1, element_type: str = 'ctetra'):
|
||||
"""Extract stress from solid elements."""
|
||||
from pyNastran.op2.op2 import OP2
|
||||
import numpy as np
|
||||
|
||||
model = OP2()
|
||||
model.read_op2(str(op2_file))
|
||||
|
||||
# Get stress object for element type
|
||||
# In pyNastran, stress is stored in model.op2_results.stress
|
||||
stress_attr = f"{element_type}_stress"
|
||||
|
||||
if not hasattr(model, 'op2_results') or not hasattr(model.op2_results, 'stress'):
|
||||
raise ValueError(f"No stress results in OP2")
|
||||
|
||||
stress_obj = model.op2_results.stress
|
||||
if not hasattr(stress_obj, stress_attr):
|
||||
raise ValueError(f"No {element_type} stress results in OP2")
|
||||
|
||||
stress = getattr(stress_obj, stress_attr)[subcase]
|
||||
itime = 0
|
||||
|
||||
# Extract von Mises if available
|
||||
if stress.is_von_mises: # Property, not method
|
||||
von_mises = stress.data[itime, :, 9] # Column 9 is von Mises
|
||||
max_stress = float(np.max(von_mises))
|
||||
|
||||
# Get element info
|
||||
element_ids = [eid for (eid, node) in stress.element_node]
|
||||
max_stress_elem = element_ids[np.argmax(von_mises)]
|
||||
|
||||
return {
|
||||
'max_von_mises': max_stress,
|
||||
'max_stress_element': int(max_stress_elem)
|
||||
}
|
||||
else:
|
||||
raise ValueError("von Mises stress not available")''',
|
||||
api_path='model.ctetra_stress[subcase] or model.chexa_stress[subcase]',
|
||||
data_structure='data[itime, :, 10] where column 9=von_mises',
|
||||
examples=['stress = extract_solid_stress(Path("results.op2"), element_type="ctetra")']
|
||||
)
|
||||
|
||||
# CBAR force extraction
|
||||
patterns['cbar_force'] = ExtractionPattern(
|
||||
name='cbar_force',
|
||||
description='Extract forces from CBAR elements',
|
||||
element_type='CBAR',
|
||||
result_type='force',
|
||||
code_template='''def extract_cbar_force(op2_file: Path, subcase: int = 1, direction: str = 'Z'):
|
||||
"""
|
||||
Extract forces from CBAR elements.
|
||||
|
||||
Args:
|
||||
op2_file: Path to OP2 file
|
||||
subcase: Subcase ID
|
||||
direction: Force direction ('X', 'Y', 'Z', 'axial', 'torque')
|
||||
|
||||
Returns:
|
||||
Dict with force statistics
|
||||
"""
|
||||
from pyNastran.op2.op2 import OP2
|
||||
import numpy as np
|
||||
|
||||
model = OP2()
|
||||
model.read_op2(str(op2_file))
|
||||
|
||||
if not hasattr(model, 'cbar_force'):
|
||||
raise ValueError("No CBAR force results in OP2")
|
||||
|
||||
force = model.cbar_force[subcase]
|
||||
itime = 0
|
||||
|
||||
# CBAR force data structure:
|
||||
# [bending_moment_a1, bending_moment_a2,
|
||||
# bending_moment_b1, bending_moment_b2,
|
||||
# shear1, shear2, axial, torque]
|
||||
|
||||
direction_map = {
|
||||
'shear1': 4,
|
||||
'shear2': 5,
|
||||
'axial': 6,
|
||||
'Z': 6, # Commonly axial is Z direction
|
||||
'torque': 7
|
||||
}
|
||||
|
||||
col_idx = direction_map.get(direction, direction_map.get(direction.lower(), 6))
|
||||
forces = force.data[itime, :, col_idx]
|
||||
|
||||
return {
|
||||
f'max_{direction}_force': float(np.max(np.abs(forces))),
|
||||
f'avg_{direction}_force': float(np.mean(np.abs(forces))),
|
||||
f'min_{direction}_force': float(np.min(np.abs(forces))),
|
||||
'forces_array': forces.tolist()
|
||||
}''',
|
||||
api_path='model.cbar_force[subcase]',
|
||||
data_structure='data[ntimes, nelements, 8] where 8=[bm_a1, bm_a2, bm_b1, bm_b2, shear1, shear2, axial, torque]',
|
||||
examples=['forces = extract_cbar_force(Path("results.op2"), direction="Z")']
|
||||
)
|
||||
|
||||
return patterns
|
||||
|
||||
def research_extraction(self, request: Dict[str, Any]) -> ExtractionPattern:
|
||||
"""
|
||||
Research and find/generate extraction pattern for a request.
|
||||
|
||||
Args:
|
||||
request: Dict with:
|
||||
- action: e.g., 'extract_1d_element_forces'
|
||||
- domain: e.g., 'result_extraction'
|
||||
- params: {'element_types': ['CBAR'], 'result_type': 'element_force', 'direction': 'Z'}
|
||||
|
||||
Returns:
|
||||
ExtractionPattern with code template
|
||||
"""
|
||||
action = request.get('action', '')
|
||||
params = request.get('params', {})
|
||||
|
||||
# Determine result type
|
||||
if 'displacement' in action.lower():
|
||||
return self.patterns['displacement']
|
||||
|
||||
elif 'stress' in action.lower():
|
||||
element_types = params.get('element_types', [])
|
||||
if any(et in ['CTETRA', 'CHEXA', 'CPENTA'] for et in element_types):
|
||||
return self.patterns['solid_stress']
|
||||
# Could add plate stress pattern here
|
||||
return self.patterns['solid_stress'] # Default to solid for now
|
||||
|
||||
elif 'force' in action.lower() or 'element_force' in params.get('result_type', ''):
|
||||
element_types = params.get('element_types', [])
|
||||
if 'CBAR' in element_types or '1d' in action.lower():
|
||||
return self.patterns['cbar_force']
|
||||
|
||||
# Fallback: return generic pattern
|
||||
return self._generate_generic_pattern(request)
|
||||
|
||||
def _generate_generic_pattern(self, request: Dict[str, Any]) -> ExtractionPattern:
|
||||
"""Generate a generic extraction pattern as fallback."""
|
||||
return ExtractionPattern(
|
||||
name='generic_extraction',
|
||||
description=f"Generic extraction for {request.get('action', 'unknown')}",
|
||||
element_type=None,
|
||||
result_type='unknown',
|
||||
code_template='''def extract_generic(op2_file: Path):
|
||||
"""Generic OP2 extraction - needs customization."""
|
||||
from pyNastran.op2.op2 import OP2
|
||||
|
||||
model = OP2()
|
||||
model.read_op2(str(op2_file))
|
||||
|
||||
# TODO: Customize extraction based on requirements
|
||||
# Available: model.displacements, model.ctetra_stress, etc.
|
||||
# Use model.get_op2_stats() to see available results
|
||||
|
||||
return {'result': None}''',
|
||||
api_path='model.<result_type>[subcase]',
|
||||
data_structure='Varies by result type',
|
||||
examples=['# Needs customization']
|
||||
)
|
||||
|
||||
def generate_extractor_code(self, request: Dict[str, Any]) -> str:
|
||||
"""
|
||||
Generate complete extractor code for a request.
|
||||
|
||||
Args:
|
||||
request: Extraction request from Phase 2.7 LLM
|
||||
|
||||
Returns:
|
||||
Complete Python code as string
|
||||
"""
|
||||
pattern = self.research_extraction(request)
|
||||
|
||||
# Generate module header
|
||||
description = request.get('description', pattern.description)
|
||||
|
||||
code = f'''"""
|
||||
{description}
|
||||
Auto-generated by Atomizer Phase 3 - pyNastran Research Agent
|
||||
|
||||
Pattern: {pattern.name}
|
||||
Element Type: {pattern.element_type or 'General'}
|
||||
Result Type: {pattern.result_type}
|
||||
API: {pattern.api_path}
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any
|
||||
import numpy as np
|
||||
from pyNastran.op2.op2 import OP2
|
||||
|
||||
|
||||
{pattern.code_template}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Example usage
|
||||
import sys
|
||||
if len(sys.argv) > 1:
|
||||
op2_file = Path(sys.argv[1])
|
||||
result = {pattern.code_template.split('(')[0].split()[-1]}(op2_file)
|
||||
print(f"Extraction result: {{result}}")
|
||||
else:
|
||||
print("Usage: python {{sys.argv[0]}} <op2_file>")
|
||||
'''
|
||||
|
||||
return code
|
||||
|
||||
def save_pattern(self, pattern: ExtractionPattern):
|
||||
"""Save a pattern to the knowledge base."""
|
||||
pattern_file = self.knowledge_base_path / f"{pattern.name}.json"
|
||||
|
||||
pattern_dict = {
|
||||
'name': pattern.name,
|
||||
'description': pattern.description,
|
||||
'element_type': pattern.element_type,
|
||||
'result_type': pattern.result_type,
|
||||
'code_template': pattern.code_template,
|
||||
'api_path': pattern.api_path,
|
||||
'data_structure': pattern.data_structure,
|
||||
'examples': pattern.examples
|
||||
}
|
||||
|
||||
with open(pattern_file, 'w') as f:
|
||||
json.dump(pattern_dict, f, indent=2)
|
||||
|
||||
def load_pattern(self, name: str) -> Optional[ExtractionPattern]:
|
||||
"""Load a pattern from the knowledge base."""
|
||||
pattern_file = self.knowledge_base_path / f"{name}.json"
|
||||
|
||||
if not pattern_file.exists():
|
||||
return None
|
||||
|
||||
with open(pattern_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
return ExtractionPattern(**data)
|
||||
|
||||
|
||||
def main():
|
||||
"""Test the pyNastran research agent."""
|
||||
print("=" * 80)
|
||||
print("Phase 3: pyNastran Research Agent Test")
|
||||
print("=" * 80)
|
||||
print()
|
||||
|
||||
agent = PyNastranResearchAgent()
|
||||
|
||||
# Test request: CBAR force extraction (from Phase 2.7 example)
|
||||
test_request = {
|
||||
"action": "extract_1d_element_forces",
|
||||
"domain": "result_extraction",
|
||||
"description": "Extract element forces from CBAR in Z direction from OP2",
|
||||
"params": {
|
||||
"element_types": ["CBAR"],
|
||||
"result_type": "element_force",
|
||||
"direction": "Z"
|
||||
}
|
||||
}
|
||||
|
||||
print("Test Request:")
|
||||
print(f" Action: {test_request['action']}")
|
||||
print(f" Description: {test_request['description']}")
|
||||
print()
|
||||
|
||||
print("1. Researching extraction pattern...")
|
||||
pattern = agent.research_extraction(test_request)
|
||||
print(f" Found pattern: {pattern.name}")
|
||||
print(f" API path: {pattern.api_path}")
|
||||
print()
|
||||
|
||||
print("2. Generating extractor code...")
|
||||
code = agent.generate_extractor_code(test_request)
|
||||
print()
|
||||
|
||||
print("=" * 80)
|
||||
print("Generated Extractor Code:")
|
||||
print("=" * 80)
|
||||
print(code)
|
||||
|
||||
# Save to file
|
||||
output_file = Path("generated_extractors") / "cbar_force_extractor.py"
|
||||
output_file.parent.mkdir(exist_ok=True)
|
||||
with open(output_file, 'w') as f:
|
||||
f.write(code)
|
||||
|
||||
print()
|
||||
print(f"[OK] Saved to: {output_file}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1384
optimization_engine/future/research_agent.py
Normal file
1384
optimization_engine/future/research_agent.py
Normal file
File diff suppressed because it is too large
Load Diff
332
optimization_engine/future/step_classifier.py
Normal file
332
optimization_engine/future/step_classifier.py
Normal file
@@ -0,0 +1,332 @@
|
||||
"""
|
||||
Step Classifier - Phase 2.6
|
||||
|
||||
Classifies workflow steps into:
|
||||
1. Engineering Features - Complex FEA/CAE operations needing research/documentation
|
||||
2. Inline Calculations - Simple math operations to generate on-the-fly
|
||||
3. Post-Processing Hooks - Middleware scripts between engineering steps
|
||||
|
||||
Author: Atomizer Development Team
|
||||
Version: 0.1.0 (Phase 2.6)
|
||||
Last Updated: 2025-01-16
|
||||
"""
|
||||
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
import re
|
||||
|
||||
|
||||
@dataclass
|
||||
class StepClassification:
|
||||
"""Classification result for a workflow step."""
|
||||
step_type: str # 'engineering_feature', 'inline_calculation', 'post_processing_hook'
|
||||
complexity: str # 'simple', 'moderate', 'complex'
|
||||
requires_research: bool
|
||||
requires_documentation: bool
|
||||
auto_generate: bool
|
||||
reasoning: str
|
||||
|
||||
|
||||
class StepClassifier:
|
||||
"""
|
||||
Intelligently classifies workflow steps to determine if they need:
|
||||
- Full feature engineering (FEA/CAE operations)
|
||||
- Inline code generation (simple math)
|
||||
- Post-processing hooks (middleware)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# Engineering operations that require research/documentation
|
||||
self.engineering_operations = {
|
||||
# FEA Result Extraction
|
||||
'extract_result': ['displacement', 'stress', 'strain', 'reaction_force',
|
||||
'element_force', 'temperature', 'modal', 'buckling'],
|
||||
|
||||
# FEA Property Modifications
|
||||
'update_fea_property': ['cbush_stiffness', 'pcomp_layup', 'mat1_properties',
|
||||
'pshell_thickness', 'pbeam_properties', 'contact_stiffness'],
|
||||
|
||||
# Geometry/CAD Operations
|
||||
'modify_geometry': ['extrude', 'revolve', 'boolean', 'fillet', 'chamfer'],
|
||||
'read_expression': ['part_expression', 'assembly_expression'],
|
||||
|
||||
# Simulation Setup
|
||||
'run_analysis': ['sol101', 'sol103', 'sol106', 'sol111', 'sol400'],
|
||||
'create_material': ['mat1', 'mat8', 'mat9', 'physical_material'],
|
||||
'apply_loads': ['force', 'moment', 'pressure', 'thermal_load'],
|
||||
'create_mesh': ['tetra', 'hex', 'shell', 'beam'],
|
||||
}
|
||||
|
||||
# Simple mathematical operations (no feature needed)
|
||||
self.simple_math_operations = {
|
||||
'average', 'mean', 'max', 'maximum', 'min', 'minimum',
|
||||
'sum', 'total', 'count', 'ratio', 'percentage',
|
||||
'compare', 'difference', 'delta', 'absolute',
|
||||
'normalize', 'scale', 'round', 'floor', 'ceil'
|
||||
}
|
||||
|
||||
# Statistical operations (still simple, but slightly more complex)
|
||||
self.statistical_operations = {
|
||||
'std', 'stddev', 'variance', 'median', 'mode',
|
||||
'percentile', 'quartile', 'range', 'iqr'
|
||||
}
|
||||
|
||||
# Post-processing indicators
|
||||
self.post_processing_indicators = {
|
||||
'custom objective', 'metric', 'criteria', 'evaluation',
|
||||
'transform', 'filter', 'aggregate', 'combine'
|
||||
}
|
||||
|
||||
def classify_step(self, action: str, domain: str, params: Dict[str, Any],
|
||||
request_context: str = "") -> StepClassification:
|
||||
"""
|
||||
Classify a workflow step into engineering feature, inline calc, or hook.
|
||||
|
||||
Args:
|
||||
action: The action type (e.g., 'extract_result', 'update_parameters')
|
||||
domain: The domain (e.g., 'result_extraction', 'optimization')
|
||||
params: Step parameters
|
||||
request_context: Original user request for context
|
||||
|
||||
Returns:
|
||||
StepClassification with type and reasoning
|
||||
"""
|
||||
action_lower = action.lower()
|
||||
request_lower = request_context.lower()
|
||||
|
||||
# Check for engineering operations
|
||||
if self._is_engineering_operation(action, params):
|
||||
return StepClassification(
|
||||
step_type='engineering_feature',
|
||||
complexity='complex',
|
||||
requires_research=True,
|
||||
requires_documentation=True,
|
||||
auto_generate=False,
|
||||
reasoning=f"FEA/CAE operation '{action}' requires specialized knowledge and documentation"
|
||||
)
|
||||
|
||||
# Check for simple mathematical calculations
|
||||
if self._is_simple_calculation(action, params, request_lower):
|
||||
return StepClassification(
|
||||
step_type='inline_calculation',
|
||||
complexity='simple',
|
||||
requires_research=False,
|
||||
requires_documentation=False,
|
||||
auto_generate=True,
|
||||
reasoning=f"Simple mathematical operation that can be generated inline"
|
||||
)
|
||||
|
||||
# Check for post-processing hooks
|
||||
if self._is_post_processing_hook(action, params, request_lower):
|
||||
return StepClassification(
|
||||
step_type='post_processing_hook',
|
||||
complexity='moderate',
|
||||
requires_research=False,
|
||||
requires_documentation=False,
|
||||
auto_generate=True,
|
||||
reasoning=f"Post-processing calculation between FEA steps"
|
||||
)
|
||||
|
||||
# Check if it's a known simple action
|
||||
if action in ['identify_parameters', 'update_parameters', 'optimize']:
|
||||
return StepClassification(
|
||||
step_type='engineering_feature',
|
||||
complexity='moderate',
|
||||
requires_research=False, # May already exist
|
||||
requires_documentation=True,
|
||||
auto_generate=False,
|
||||
reasoning=f"Standard optimization workflow step"
|
||||
)
|
||||
|
||||
# Default: treat as engineering feature to be safe
|
||||
return StepClassification(
|
||||
step_type='engineering_feature',
|
||||
complexity='moderate',
|
||||
requires_research=True,
|
||||
requires_documentation=True,
|
||||
auto_generate=False,
|
||||
reasoning=f"Unknown action type, treating as engineering feature"
|
||||
)
|
||||
|
||||
def _is_engineering_operation(self, action: str, params: Dict[str, Any]) -> bool:
|
||||
"""Check if this is a complex engineering operation."""
|
||||
# Check action type
|
||||
if action in self.engineering_operations:
|
||||
return True
|
||||
|
||||
# Check for FEA-specific parameters
|
||||
fea_indicators = [
|
||||
'result_type', 'solver', 'element_type', 'material_type',
|
||||
'mesh_type', 'load_type', 'subcase', 'solution'
|
||||
]
|
||||
|
||||
for indicator in fea_indicators:
|
||||
if indicator in params:
|
||||
return True
|
||||
|
||||
# Check for specific result types that need FEA extraction
|
||||
if 'result_type' in params:
|
||||
result_type = params['result_type']
|
||||
engineering_results = ['displacement', 'stress', 'strain', 'reaction_force',
|
||||
'element_force', 'temperature', 'modal', 'buckling']
|
||||
if result_type in engineering_results:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _is_simple_calculation(self, action: str, params: Dict[str, Any],
|
||||
request_context: str) -> bool:
|
||||
"""Check if this is a simple mathematical calculation."""
|
||||
# Check for math keywords in action
|
||||
action_words = set(action.lower().split('_'))
|
||||
if action_words & self.simple_math_operations:
|
||||
return True
|
||||
|
||||
# Check for statistical operations
|
||||
if action_words & self.statistical_operations:
|
||||
return True
|
||||
|
||||
# Check for calculation keywords in request
|
||||
calc_patterns = [
|
||||
r'\b(calculate|compute|find)\s+(average|mean|max|min|sum)\b',
|
||||
r'\b(average|mean)\s+of\b',
|
||||
r'\bfind\s+the\s+(maximum|minimum)\b',
|
||||
r'\bcompare\s+.+\s+to\s+',
|
||||
]
|
||||
|
||||
for pattern in calc_patterns:
|
||||
if re.search(pattern, request_context):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _is_post_processing_hook(self, action: str, params: Dict[str, Any],
|
||||
request_context: str) -> bool:
|
||||
"""Check if this is a post-processing hook between steps."""
|
||||
# Look for custom objective/metric definitions
|
||||
for indicator in self.post_processing_indicators:
|
||||
if indicator in request_context:
|
||||
# Check if it involves multiple inputs (sign of post-processing)
|
||||
if 'average' in request_context and 'maximum' in request_context:
|
||||
return True
|
||||
if 'compare' in request_context:
|
||||
return True
|
||||
if 'assign' in request_context and 'metric' in request_context:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def classify_workflow(self, workflow_steps: List[Any],
|
||||
request_context: str = "") -> Dict[str, List[Any]]:
|
||||
"""
|
||||
Classify all steps in a workflow.
|
||||
|
||||
Returns:
|
||||
{
|
||||
'engineering_features': [...],
|
||||
'inline_calculations': [...],
|
||||
'post_processing_hooks': [...]
|
||||
}
|
||||
"""
|
||||
classified = {
|
||||
'engineering_features': [],
|
||||
'inline_calculations': [],
|
||||
'post_processing_hooks': []
|
||||
}
|
||||
|
||||
for step in workflow_steps:
|
||||
classification = self.classify_step(
|
||||
step.action,
|
||||
step.domain,
|
||||
step.params,
|
||||
request_context
|
||||
)
|
||||
|
||||
step_with_classification = {
|
||||
'step': step,
|
||||
'classification': classification
|
||||
}
|
||||
|
||||
if classification.step_type == 'engineering_feature':
|
||||
classified['engineering_features'].append(step_with_classification)
|
||||
elif classification.step_type == 'inline_calculation':
|
||||
classified['inline_calculations'].append(step_with_classification)
|
||||
elif classification.step_type == 'post_processing_hook':
|
||||
classified['post_processing_hooks'].append(step_with_classification)
|
||||
|
||||
return classified
|
||||
|
||||
def get_summary(self, classified_workflow: Dict[str, List[Any]]) -> str:
|
||||
"""Get human-readable summary of classification."""
|
||||
lines = []
|
||||
lines.append("Workflow Classification Summary")
|
||||
lines.append("=" * 80)
|
||||
lines.append("")
|
||||
|
||||
# Engineering features
|
||||
eng_features = classified_workflow['engineering_features']
|
||||
lines.append(f"Engineering Features (Need Research): {len(eng_features)}")
|
||||
for item in eng_features:
|
||||
step = item['step']
|
||||
classification = item['classification']
|
||||
lines.append(f" - {step.action} ({step.domain})")
|
||||
lines.append(f" Reason: {classification.reasoning}")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Inline calculations
|
||||
inline_calcs = classified_workflow['inline_calculations']
|
||||
lines.append(f"Inline Calculations (Auto-Generate): {len(inline_calcs)}")
|
||||
for item in inline_calcs:
|
||||
step = item['step']
|
||||
lines.append(f" - {step.action}: {step.params}")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Post-processing hooks
|
||||
hooks = classified_workflow['post_processing_hooks']
|
||||
lines.append(f"Post-Processing Hooks (Auto-Generate): {len(hooks)}")
|
||||
for item in hooks:
|
||||
step = item['step']
|
||||
lines.append(f" - {step.action}: {step.params}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
"""Test the step classifier."""
|
||||
from optimization_engine.future.workflow_decomposer import WorkflowDecomposer
|
||||
|
||||
print("Step Classifier Test")
|
||||
print("=" * 80)
|
||||
print()
|
||||
|
||||
# Test with CBUSH optimization request
|
||||
request = """I want to extract forces in direction Z of all the 1D elements and find the average of it,
|
||||
then find the maximum value and compare it to the average, then assign it to a objective metric that needs to be minimized."""
|
||||
|
||||
decomposer = WorkflowDecomposer()
|
||||
classifier = StepClassifier()
|
||||
|
||||
print("Request:")
|
||||
print(request)
|
||||
print()
|
||||
|
||||
# Decompose workflow
|
||||
steps = decomposer.decompose(request)
|
||||
|
||||
print("Workflow Steps:")
|
||||
for i, step in enumerate(steps, 1):
|
||||
print(f"{i}. {step.action} ({step.domain})")
|
||||
print()
|
||||
|
||||
# Classify steps
|
||||
classified = classifier.classify_workflow(steps, request)
|
||||
|
||||
# Display summary
|
||||
print(classifier.get_summary(classified))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
255
optimization_engine/future/targeted_research_planner.py
Normal file
255
optimization_engine/future/targeted_research_planner.py
Normal file
@@ -0,0 +1,255 @@
|
||||
"""
|
||||
Targeted Research Planner
|
||||
|
||||
Creates focused research plans that target ONLY the actual knowledge gaps,
|
||||
leveraging similar existing capabilities when available.
|
||||
|
||||
Author: Atomizer Development Team
|
||||
Version: 0.1.0 (Phase 2.5)
|
||||
Last Updated: 2025-01-16
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any
|
||||
from pathlib import Path
|
||||
|
||||
from optimization_engine.config.capability_matcher import CapabilityMatch, StepMatch
|
||||
|
||||
|
||||
class TargetedResearchPlanner:
|
||||
"""Creates research plan focused on actual gaps."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def plan(self, capability_match: CapabilityMatch) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Create targeted research plan for missing capabilities.
|
||||
|
||||
For gap='strain_from_op2', similar_to='stress_from_op2':
|
||||
|
||||
Research Plan:
|
||||
1. Read existing op2_extractor_example.py to understand pattern
|
||||
2. Search pyNastran docs for strain extraction API
|
||||
3. If not found, ask user for strain extraction example
|
||||
4. Generate extract_strain() function following same pattern as extract_stress()
|
||||
"""
|
||||
if not capability_match.unknown_steps:
|
||||
return []
|
||||
|
||||
research_steps = []
|
||||
|
||||
for unknown_step in capability_match.unknown_steps:
|
||||
steps_for_this_gap = self._plan_for_gap(unknown_step)
|
||||
research_steps.extend(steps_for_this_gap)
|
||||
|
||||
return research_steps
|
||||
|
||||
def _plan_for_gap(self, step_match: StepMatch) -> List[Dict[str, Any]]:
|
||||
"""Create research plan for a single gap."""
|
||||
step = step_match.step
|
||||
similar = step_match.similar_capabilities
|
||||
|
||||
plan_steps = []
|
||||
|
||||
# If we have similar capabilities, start by studying them
|
||||
if similar:
|
||||
plan_steps.append({
|
||||
'action': 'read_existing_code',
|
||||
'description': f'Study existing {similar[0]} implementation to understand pattern',
|
||||
'details': {
|
||||
'capability': similar[0],
|
||||
'category': step.domain,
|
||||
'purpose': f'Learn pattern for {step.action}'
|
||||
},
|
||||
'expected_confidence': 0.7,
|
||||
'priority': 1
|
||||
})
|
||||
|
||||
# Search knowledge base for previous similar work
|
||||
plan_steps.append({
|
||||
'action': 'search_knowledge_base',
|
||||
'description': f'Search for previous {step.domain} work',
|
||||
'details': {
|
||||
'query': f"{step.domain} {step.action}",
|
||||
'required_params': step.params
|
||||
},
|
||||
'expected_confidence': 0.8 if similar else 0.5,
|
||||
'priority': 2
|
||||
})
|
||||
|
||||
# For result extraction, search pyNastran docs
|
||||
if step.domain == 'result_extraction':
|
||||
result_type = step.params.get('result_type', '')
|
||||
plan_steps.append({
|
||||
'action': 'search_pynastran_docs',
|
||||
'description': f'Search pyNastran documentation for {result_type} extraction',
|
||||
'details': {
|
||||
'query': f'pyNastran OP2 {result_type} extraction',
|
||||
'library': 'pyNastran',
|
||||
'expected_api': f'op2.{result_type}s or similar'
|
||||
},
|
||||
'expected_confidence': 0.85,
|
||||
'priority': 3
|
||||
})
|
||||
|
||||
# For simulation, search NX docs
|
||||
elif step.domain == 'simulation':
|
||||
solver = step.params.get('solver', '')
|
||||
plan_steps.append({
|
||||
'action': 'query_nx_docs',
|
||||
'description': f'Search NX documentation for {solver}',
|
||||
'details': {
|
||||
'query': f'NX Nastran {solver} solver',
|
||||
'solver_type': solver
|
||||
},
|
||||
'expected_confidence': 0.85,
|
||||
'priority': 3
|
||||
})
|
||||
|
||||
# As fallback, ask user for example
|
||||
plan_steps.append({
|
||||
'action': 'ask_user_for_example',
|
||||
'description': f'Request example from user for {step.action}',
|
||||
'details': {
|
||||
'prompt': f"Could you provide an example of {step.action.replace('_', ' ')}?",
|
||||
'suggested_file_types': self._get_suggested_file_types(step.domain),
|
||||
'params_needed': step.params
|
||||
},
|
||||
'expected_confidence': 0.95, # User examples have high confidence
|
||||
'priority': 4
|
||||
})
|
||||
|
||||
return plan_steps
|
||||
|
||||
def _get_suggested_file_types(self, domain: str) -> List[str]:
|
||||
"""Get suggested file types for user examples based on domain."""
|
||||
suggestions = {
|
||||
'materials': ['.xml', '.mtl'],
|
||||
'geometry': ['.py', '.prt'],
|
||||
'loads_bc': ['.py', '.xml'],
|
||||
'mesh': ['.py', '.dat'],
|
||||
'result_extraction': ['.py', '.txt'],
|
||||
'optimization': ['.py', '.json']
|
||||
}
|
||||
return suggestions.get(domain, ['.py', '.txt'])
|
||||
|
||||
def get_plan_summary(self, plan: List[Dict[str, Any]]) -> str:
|
||||
"""Get human-readable summary of research plan."""
|
||||
if not plan:
|
||||
return "No research needed - all capabilities are known!"
|
||||
|
||||
lines = [
|
||||
"Targeted Research Plan",
|
||||
"=" * 80,
|
||||
"",
|
||||
f"Research steps needed: {len(plan)}",
|
||||
""
|
||||
]
|
||||
|
||||
current_gap = None
|
||||
for i, step in enumerate(plan, 1):
|
||||
# Group by action for clarity
|
||||
if step['action'] != current_gap:
|
||||
current_gap = step['action']
|
||||
lines.append(f"\nStep {i}: {step['description']}")
|
||||
lines.append("-" * 80)
|
||||
else:
|
||||
lines.append(f"\nStep {i}: {step['description']}")
|
||||
|
||||
lines.append(f" Action: {step['action']}")
|
||||
|
||||
if 'details' in step:
|
||||
if 'capability' in step['details']:
|
||||
lines.append(f" Study: {step['details']['capability']}")
|
||||
if 'query' in step['details']:
|
||||
lines.append(f" Query: \"{step['details']['query']}\"")
|
||||
if 'prompt' in step['details']:
|
||||
lines.append(f" Prompt: \"{step['details']['prompt']}\"")
|
||||
|
||||
lines.append(f" Expected confidence: {step['expected_confidence']:.0%}")
|
||||
|
||||
lines.append("")
|
||||
lines.append("=" * 80)
|
||||
|
||||
# Add strategic summary
|
||||
lines.append("\nResearch Strategy:")
|
||||
lines.append("-" * 80)
|
||||
|
||||
has_existing_code = any(s['action'] == 'read_existing_code' for s in plan)
|
||||
if has_existing_code:
|
||||
lines.append(" - Will adapt from existing similar code patterns")
|
||||
lines.append(" - Lower risk: Can follow proven implementation")
|
||||
else:
|
||||
lines.append(" - New domain: Will need to research from scratch")
|
||||
lines.append(" - Higher risk: No existing patterns to follow")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
"""Test the targeted research planner."""
|
||||
from optimization_engine.utils.codebase_analyzer import CodebaseCapabilityAnalyzer
|
||||
from optimization_engine.future.workflow_decomposer import WorkflowDecomposer
|
||||
from optimization_engine.config.capability_matcher import CapabilityMatcher
|
||||
|
||||
print("Targeted Research Planner Test")
|
||||
print("=" * 80)
|
||||
print()
|
||||
|
||||
# Initialize components
|
||||
analyzer = CodebaseCapabilityAnalyzer()
|
||||
decomposer = WorkflowDecomposer()
|
||||
matcher = CapabilityMatcher(analyzer)
|
||||
planner = TargetedResearchPlanner()
|
||||
|
||||
# Test with strain optimization request
|
||||
test_request = "I want to evaluate strain on a part with sol101 and optimize this (minimize) using iterations and optuna to lower it varying all my geometry parameters that contains v_ in its expression"
|
||||
|
||||
print("Request:")
|
||||
print(test_request)
|
||||
print()
|
||||
|
||||
# Full pipeline
|
||||
print("Phase 2.5 Pipeline:")
|
||||
print("-" * 80)
|
||||
print("1. Decompose workflow...")
|
||||
steps = decomposer.decompose(test_request)
|
||||
print(f" Found {len(steps)} workflow steps")
|
||||
|
||||
print("\n2. Match to codebase capabilities...")
|
||||
match = matcher.match(steps)
|
||||
print(f" Known: {len(match.known_steps)}/{len(steps)}")
|
||||
print(f" Unknown: {len(match.unknown_steps)}/{len(steps)}")
|
||||
print(f" Overall confidence: {match.overall_confidence:.0%}")
|
||||
|
||||
print("\n3. Create targeted research plan...")
|
||||
plan = planner.plan(match)
|
||||
print(f" Generated {len(plan)} research steps")
|
||||
|
||||
print("\n" + "=" * 80)
|
||||
print()
|
||||
|
||||
# Display the plan
|
||||
print(planner.get_plan_summary(plan))
|
||||
|
||||
# Show what's being researched
|
||||
print("\n\nWhat will be researched:")
|
||||
print("-" * 80)
|
||||
for unknown_step in match.unknown_steps:
|
||||
step = unknown_step.step
|
||||
print(f" Missing: {step.action} ({step.domain})")
|
||||
print(f" Required params: {step.params}")
|
||||
if unknown_step.similar_capabilities:
|
||||
print(f" Can adapt from: {', '.join(unknown_step.similar_capabilities)}")
|
||||
print()
|
||||
|
||||
print("\nWhat will NOT be researched (already known):")
|
||||
print("-" * 80)
|
||||
for known_step in match.known_steps:
|
||||
step = known_step.step
|
||||
print(f" - {step.action} ({step.domain})")
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
525
optimization_engine/future/workflow_decomposer.py
Normal file
525
optimization_engine/future/workflow_decomposer.py
Normal file
@@ -0,0 +1,525 @@
|
||||
"""
|
||||
Workflow Decomposer
|
||||
|
||||
Breaks complex user requests into atomic workflow steps that can be matched
|
||||
against existing codebase capabilities.
|
||||
|
||||
IMPROVED VERSION: Handles multi-objective optimization, constraints, and complex requests.
|
||||
|
||||
Author: Atomizer Development Team
|
||||
Version: 0.2.0 (Phase 2.5 - Improved)
|
||||
Last Updated: 2025-01-16
|
||||
"""
|
||||
|
||||
import re
|
||||
from typing import List, Dict, Any, Set
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkflowStep:
|
||||
"""Represents a single atomic step in a workflow."""
|
||||
action: str
|
||||
domain: str
|
||||
params: Dict[str, Any]
|
||||
priority: int = 0
|
||||
|
||||
|
||||
class WorkflowDecomposer:
|
||||
"""Breaks complex requests into atomic workflow steps."""
|
||||
|
||||
def __init__(self):
|
||||
# Extended result type mapping
|
||||
self.result_types = {
|
||||
'displacement': 'displacement',
|
||||
'deformation': 'displacement',
|
||||
'stress': 'stress',
|
||||
'von mises': 'stress',
|
||||
'strain': 'strain',
|
||||
'modal': 'modal',
|
||||
'mode': 'modal',
|
||||
'eigenvalue': 'modal',
|
||||
'frequency': 'modal',
|
||||
'temperature': 'temperature',
|
||||
'thermal': 'temperature',
|
||||
'reaction': 'reaction_force',
|
||||
'reaction force': 'reaction_force',
|
||||
'nodal reaction': 'reaction_force',
|
||||
'force': 'reaction_force',
|
||||
'mass': 'mass',
|
||||
'weight': 'mass',
|
||||
'volume': 'volume'
|
||||
}
|
||||
|
||||
# Solver type mapping
|
||||
self.solver_types = {
|
||||
'sol101': 'SOL101',
|
||||
'sol 101': 'SOL101',
|
||||
'static': 'SOL101',
|
||||
'sol103': 'SOL103',
|
||||
'sol 103': 'SOL103',
|
||||
'modal': 'SOL103',
|
||||
'sol106': 'SOL106',
|
||||
'sol 106': 'SOL106',
|
||||
'nonlinear': 'SOL106',
|
||||
'sol105': 'SOL105',
|
||||
'buckling': 'SOL105'
|
||||
}
|
||||
|
||||
def decompose(self, user_request: str) -> List[WorkflowStep]:
|
||||
"""
|
||||
Break user request into atomic workflow steps.
|
||||
|
||||
Handles:
|
||||
- Multi-objective optimization
|
||||
- Constraints
|
||||
- Multiple result extractions
|
||||
- Custom expressions
|
||||
- Parameter filtering
|
||||
"""
|
||||
steps = []
|
||||
request_lower = user_request.lower()
|
||||
|
||||
# Check if this is an optimization request
|
||||
is_optimization = self._is_optimization_request(request_lower)
|
||||
|
||||
if is_optimization:
|
||||
steps = self._decompose_optimization_workflow(user_request, request_lower)
|
||||
else:
|
||||
steps = self._decompose_simple_workflow(user_request, request_lower)
|
||||
|
||||
# Sort by priority
|
||||
steps.sort(key=lambda s: s.priority)
|
||||
|
||||
return steps
|
||||
|
||||
def _is_optimization_request(self, text: str) -> bool:
|
||||
"""Check if request involves optimization."""
|
||||
optimization_keywords = [
|
||||
'optimize', 'optimiz', 'minimize', 'minimiz', 'maximize', 'maximiz',
|
||||
'optuna', 'genetic', 'iteration', 'vary', 'varying'
|
||||
]
|
||||
return any(kw in text for kw in optimization_keywords)
|
||||
|
||||
def _decompose_optimization_workflow(self, request: str, request_lower: str) -> List[WorkflowStep]:
|
||||
"""Decompose an optimization request into workflow steps."""
|
||||
steps = []
|
||||
priority = 1
|
||||
|
||||
# 1. Identify and filter parameters
|
||||
param_filter = self._extract_parameter_filter(request, request_lower)
|
||||
if param_filter:
|
||||
steps.append(WorkflowStep(
|
||||
action='identify_parameters',
|
||||
domain='geometry',
|
||||
params={'filter': param_filter},
|
||||
priority=priority
|
||||
))
|
||||
priority += 1
|
||||
|
||||
# 2. Update parameters (this happens in the optimization loop)
|
||||
steps.append(WorkflowStep(
|
||||
action='update_parameters',
|
||||
domain='geometry',
|
||||
params={'source': 'optimization_algorithm'},
|
||||
priority=priority
|
||||
))
|
||||
priority += 1
|
||||
|
||||
# 3. Run simulation
|
||||
solver = self._extract_solver_type(request_lower)
|
||||
if solver:
|
||||
steps.append(WorkflowStep(
|
||||
action='run_analysis',
|
||||
domain='simulation',
|
||||
params={'solver': solver},
|
||||
priority=priority
|
||||
))
|
||||
priority += 1
|
||||
|
||||
# 4. Extract ALL result types mentioned (multi-objective!)
|
||||
result_extractions = self._extract_all_results(request, request_lower)
|
||||
for result_info in result_extractions:
|
||||
# If result has custom_expression (e.g., mass from .prt expression),
|
||||
# it's a geometry operation, not result_extraction (OP2 file)
|
||||
if 'custom_expression' in result_info:
|
||||
steps.append(WorkflowStep(
|
||||
action='read_expression',
|
||||
domain='geometry',
|
||||
params=result_info,
|
||||
priority=priority
|
||||
))
|
||||
else:
|
||||
steps.append(WorkflowStep(
|
||||
action='extract_result',
|
||||
domain='result_extraction',
|
||||
params=result_info,
|
||||
priority=priority
|
||||
))
|
||||
priority += 1
|
||||
|
||||
# 5. Handle constraints
|
||||
constraints = self._extract_constraints(request, request_lower)
|
||||
if constraints:
|
||||
steps.append(WorkflowStep(
|
||||
action='apply_constraints',
|
||||
domain='optimization',
|
||||
params={'constraints': constraints},
|
||||
priority=priority
|
||||
))
|
||||
priority += 1
|
||||
|
||||
# 6. Optimize (multi-objective if multiple objectives detected)
|
||||
objectives = self._extract_objectives(request, request_lower)
|
||||
algorithm = self._extract_algorithm(request_lower)
|
||||
|
||||
steps.append(WorkflowStep(
|
||||
action='optimize',
|
||||
domain='optimization',
|
||||
params={
|
||||
'objectives': objectives,
|
||||
'algorithm': algorithm,
|
||||
'multi_objective': len(objectives) > 1
|
||||
},
|
||||
priority=priority
|
||||
))
|
||||
|
||||
return steps
|
||||
|
||||
def _decompose_simple_workflow(self, request: str, request_lower: str) -> List[WorkflowStep]:
|
||||
"""Decompose a non-optimization request."""
|
||||
steps = []
|
||||
|
||||
# Check for material creation
|
||||
if 'material' in request_lower and ('create' in request_lower or 'generate' in request_lower):
|
||||
steps.append(WorkflowStep(
|
||||
action='create_material',
|
||||
domain='materials',
|
||||
params={}
|
||||
))
|
||||
|
||||
# Check for simulation run
|
||||
solver = self._extract_solver_type(request_lower)
|
||||
if solver:
|
||||
steps.append(WorkflowStep(
|
||||
action='run_analysis',
|
||||
domain='simulation',
|
||||
params={'solver': solver}
|
||||
))
|
||||
|
||||
# Check for result extraction
|
||||
result_extractions = self._extract_all_results(request, request_lower)
|
||||
for result_info in result_extractions:
|
||||
# If result has custom_expression (e.g., mass from .prt expression),
|
||||
# it's a geometry operation, not result_extraction (OP2 file)
|
||||
if 'custom_expression' in result_info:
|
||||
steps.append(WorkflowStep(
|
||||
action='read_expression',
|
||||
domain='geometry',
|
||||
params=result_info
|
||||
))
|
||||
else:
|
||||
steps.append(WorkflowStep(
|
||||
action='extract_result',
|
||||
domain='result_extraction',
|
||||
params=result_info
|
||||
))
|
||||
|
||||
return steps
|
||||
|
||||
def _extract_parameter_filter(self, request: str, request_lower: str) -> str:
|
||||
"""Extract parameter filter from text."""
|
||||
# Look for specific suffixes/prefixes
|
||||
if '_opt' in request_lower or ' opt ' in request_lower:
|
||||
return '_opt'
|
||||
if 'v_' in request_lower:
|
||||
return 'v_'
|
||||
if '_var' in request_lower:
|
||||
return '_var'
|
||||
if 'design variable' in request_lower or 'design parameter' in request_lower:
|
||||
return 'design_variables'
|
||||
if 'all parameter' in request_lower or 'all expression' in request_lower:
|
||||
return 'all'
|
||||
|
||||
# Default to none if not specified
|
||||
return ''
|
||||
|
||||
def _extract_solver_type(self, text: str) -> str:
|
||||
"""Extract solver type from text."""
|
||||
for keyword, solver in self.solver_types.items():
|
||||
if keyword in text:
|
||||
return solver
|
||||
return ''
|
||||
|
||||
def _extract_all_results(self, request: str, request_lower: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Extract ALL result types mentioned in the request.
|
||||
Handles multiple objectives and constraints.
|
||||
"""
|
||||
result_extractions = []
|
||||
|
||||
# Find all result types mentioned
|
||||
found_types = set()
|
||||
for keyword, result_type in self.result_types.items():
|
||||
if keyword in request_lower:
|
||||
found_types.add(result_type)
|
||||
|
||||
# For each result type, extract details
|
||||
for result_type in found_types:
|
||||
result_info = {
|
||||
'result_type': result_type
|
||||
}
|
||||
|
||||
# Extract subcase information
|
||||
subcase = self._extract_subcase(request, request_lower)
|
||||
if subcase:
|
||||
result_info['subcase'] = subcase
|
||||
|
||||
# Extract direction (for reaction forces, displacements)
|
||||
if result_type in ['reaction_force', 'displacement']:
|
||||
direction = self._extract_direction(request, request_lower)
|
||||
if direction:
|
||||
result_info['direction'] = direction
|
||||
|
||||
# Extract metric (min, max, specific location)
|
||||
metric = self._extract_metric_for_type(request, request_lower, result_type)
|
||||
if metric:
|
||||
result_info['metric'] = metric
|
||||
|
||||
# Extract custom expression (for mass, etc.)
|
||||
if result_type == 'mass':
|
||||
custom_expr = self._extract_custom_expression(request, request_lower, 'mass')
|
||||
if custom_expr:
|
||||
result_info['custom_expression'] = custom_expr
|
||||
|
||||
result_extractions.append(result_info)
|
||||
|
||||
return result_extractions
|
||||
|
||||
def _extract_subcase(self, request: str, request_lower: str) -> str:
|
||||
"""Extract subcase information (solution X subcase Y)."""
|
||||
# Look for patterns like "solution 1 subcase 3"
|
||||
match = re.search(r'solution\s+(\d+)\s+subcase\s+(\d+)', request_lower)
|
||||
if match:
|
||||
return f"solution_{match.group(1)}_subcase_{match.group(2)}"
|
||||
|
||||
# Look for just "subcase X"
|
||||
match = re.search(r'subcase\s+(\d+)', request_lower)
|
||||
if match:
|
||||
return f"subcase_{match.group(1)}"
|
||||
|
||||
return ''
|
||||
|
||||
def _extract_direction(self, request: str, request_lower: str) -> str:
|
||||
"""Extract direction (X, Y, Z) for vectorial results."""
|
||||
# Look for explicit direction mentions
|
||||
if re.search(r'\bin\s+[xyz]\b', request_lower):
|
||||
match = re.search(r'in\s+([xyz])\b', request_lower)
|
||||
if match:
|
||||
return match.group(1).upper()
|
||||
|
||||
# Look for "Y direction" pattern
|
||||
if re.search(r'[xyz]\s+direction', request_lower):
|
||||
match = re.search(r'([xyz])\s+direction', request_lower)
|
||||
if match:
|
||||
return match.group(1).upper()
|
||||
|
||||
return ''
|
||||
|
||||
def _extract_metric_for_type(self, request: str, request_lower: str, result_type: str) -> str:
|
||||
"""Extract metric (min, max, average) for specific result type."""
|
||||
# Check for explicit min/max keywords near the result type
|
||||
if 'max' in request_lower or 'maximum' in request_lower:
|
||||
return f'max_{result_type}'
|
||||
if 'min' in request_lower or 'minimum' in request_lower:
|
||||
return f'min_{result_type}'
|
||||
if 'average' in request_lower or 'mean' in request_lower:
|
||||
return f'avg_{result_type}'
|
||||
|
||||
# Default to max for most result types
|
||||
return f'max_{result_type}'
|
||||
|
||||
def _extract_custom_expression(self, request: str, request_lower: str, expr_type: str) -> str:
|
||||
"""Extract custom expression names (e.g., mass_of_only_this_part)."""
|
||||
if expr_type == 'mass':
|
||||
# Look for custom mass expressions
|
||||
match = re.search(r'mass[_\w]*(?:of|for)[_\w]*', request_lower)
|
||||
if match:
|
||||
return match.group(0).replace(' ', '_')
|
||||
|
||||
# Look for explicit expression names
|
||||
if 'expression' in request_lower:
|
||||
match = re.search(r'expression\s+(\w+)', request_lower)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
return ''
|
||||
|
||||
def _extract_constraints(self, request: str, request_lower: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Extract constraints from the request.
|
||||
Examples: "maintain stress under 100 MPa", "keep displacement < 5mm"
|
||||
"""
|
||||
constraints = []
|
||||
|
||||
# Pattern 1: "maintain X under/below Y"
|
||||
maintain_pattern = r'maintain\s+(\w+)\s+(?:under|below|less than|<)\s+([\d.]+)\s*(\w+)?'
|
||||
for match in re.finditer(maintain_pattern, request_lower):
|
||||
result_type = self.result_types.get(match.group(1), match.group(1))
|
||||
value = float(match.group(2))
|
||||
unit = match.group(3) if match.group(3) else ''
|
||||
|
||||
constraints.append({
|
||||
'type': 'upper_bound',
|
||||
'result_type': result_type,
|
||||
'value': value,
|
||||
'unit': unit
|
||||
})
|
||||
|
||||
# Pattern 2: "stress < 100 MPa" or "stress < 100MPa"
|
||||
comparison_pattern = r'(\w+)\s*(<|>|<=|>=)\s*([\d.]+)\s*(\w+)?'
|
||||
for match in re.finditer(comparison_pattern, request_lower):
|
||||
result_type = self.result_types.get(match.group(1), match.group(1))
|
||||
operator = match.group(2)
|
||||
value = float(match.group(3))
|
||||
unit = match.group(4) if match.group(4) else ''
|
||||
|
||||
constraint_type = 'upper_bound' if operator in ['<', '<='] else 'lower_bound'
|
||||
|
||||
constraints.append({
|
||||
'type': constraint_type,
|
||||
'result_type': result_type,
|
||||
'operator': operator,
|
||||
'value': value,
|
||||
'unit': unit
|
||||
})
|
||||
|
||||
return constraints
|
||||
|
||||
def _extract_objectives(self, request: str, request_lower: str) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Extract optimization objectives.
|
||||
Can be multiple for multi-objective optimization.
|
||||
"""
|
||||
objectives = []
|
||||
|
||||
# Find all "minimize X" or "maximize X" patterns
|
||||
minimize_pattern = r'minimi[zs]e\s+(\w+(?:\s+\w+)*?)(?:\s+(?:and|but|with|using|varying|to)|\.|\,|$)'
|
||||
for match in re.finditer(minimize_pattern, request_lower):
|
||||
objective_text = match.group(1).strip()
|
||||
result_type = self._map_to_result_type(objective_text)
|
||||
objectives.append({
|
||||
'type': 'minimize',
|
||||
'target': result_type if result_type else objective_text
|
||||
})
|
||||
|
||||
maximize_pattern = r'maximi[zs]e\s+(\w+(?:\s+\w+)*?)(?:\s+(?:and|but|with|using|varying|to)|\.|\,|$)'
|
||||
for match in re.finditer(maximize_pattern, request_lower):
|
||||
objective_text = match.group(1).strip()
|
||||
result_type = self._map_to_result_type(objective_text)
|
||||
objectives.append({
|
||||
'type': 'maximize',
|
||||
'target': result_type if result_type else objective_text
|
||||
})
|
||||
|
||||
# If no explicit minimize/maximize but mentions optimization
|
||||
if not objectives and ('optimize' in request_lower or 'optim' in request_lower):
|
||||
# Try to infer from context
|
||||
for keyword, result_type in self.result_types.items():
|
||||
if keyword in request_lower:
|
||||
# Assume minimize for stress, strain, displacement
|
||||
# Assume maximize for modal frequencies
|
||||
obj_type = 'maximize' if result_type == 'modal' else 'minimize'
|
||||
objectives.append({
|
||||
'type': obj_type,
|
||||
'target': result_type
|
||||
})
|
||||
|
||||
return objectives if objectives else [{'type': 'minimize', 'target': 'unknown'}]
|
||||
|
||||
def _map_to_result_type(self, text: str) -> str:
|
||||
"""Map objective text to result type."""
|
||||
text_lower = text.lower().strip()
|
||||
for keyword, result_type in self.result_types.items():
|
||||
if keyword in text_lower:
|
||||
return result_type
|
||||
return text # Return as-is if no mapping found
|
||||
|
||||
def _extract_algorithm(self, text: str) -> str:
|
||||
"""Extract optimization algorithm."""
|
||||
if 'optuna' in text:
|
||||
return 'optuna'
|
||||
if 'genetic' in text or 'ga' in text:
|
||||
return 'genetic_algorithm'
|
||||
if 'gradient' in text:
|
||||
return 'gradient_based'
|
||||
if 'pso' in text or 'particle swarm' in text:
|
||||
return 'pso'
|
||||
return 'optuna' # Default
|
||||
|
||||
def get_workflow_summary(self, steps: List[WorkflowStep]) -> str:
|
||||
"""Get human-readable summary of workflow."""
|
||||
if not steps:
|
||||
return "No workflow steps identified"
|
||||
|
||||
lines = ["Workflow Steps Identified:", "=" * 60, ""]
|
||||
|
||||
for i, step in enumerate(steps, 1):
|
||||
lines.append(f"{i}. {step.action.replace('_', ' ').title()}")
|
||||
lines.append(f" Domain: {step.domain}")
|
||||
if step.params:
|
||||
lines.append(f" Parameters:")
|
||||
for key, value in step.params.items():
|
||||
if isinstance(value, list) and value:
|
||||
lines.append(f" {key}:")
|
||||
for item in value[:3]: # Show first 3 items
|
||||
lines.append(f" - {item}")
|
||||
if len(value) > 3:
|
||||
lines.append(f" ... and {len(value) - 3} more")
|
||||
else:
|
||||
lines.append(f" {key}: {value}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
"""Test the improved workflow decomposer."""
|
||||
decomposer = WorkflowDecomposer()
|
||||
|
||||
# Test case 1: Complex multi-objective with constraints
|
||||
test_request_1 = """update a geometry (.prt) with all expressions that have a _opt suffix to make the mass minimized. But the mass is not directly the total mass used, its the value under the part expression mass_of_only_this_part which is the calculation of 1of the body mass of my part, the one that I want to minimize.
|
||||
|
||||
the objective is to minimize mass but maintain stress of the solution 1 subcase 3 under 100Mpa. And also, as a second objective in my objective function, I want to minimize nodal reaction force in y of the same subcase."""
|
||||
|
||||
print("Test 1: Complex Multi-Objective Optimization with Constraints")
|
||||
print("=" * 80)
|
||||
print(f"Request: {test_request_1[:100]}...")
|
||||
print()
|
||||
|
||||
steps_1 = decomposer.decompose(test_request_1)
|
||||
print(decomposer.get_workflow_summary(steps_1))
|
||||
|
||||
print("\nDetailed Analysis:")
|
||||
print("-" * 80)
|
||||
for i, step in enumerate(steps_1, 1):
|
||||
print(f"{i}. Action: {step.action}")
|
||||
print(f" Domain: {step.domain}")
|
||||
print(f" Params: {step.params}")
|
||||
print()
|
||||
|
||||
# Test case 2: Simple strain optimization
|
||||
test_request_2 = "minimize strain using SOL101 and optuna varying v_ parameters"
|
||||
|
||||
print("\n" + "=" * 80)
|
||||
print("Test 2: Simple Strain Optimization")
|
||||
print("=" * 80)
|
||||
print(f"Request: {test_request_2}")
|
||||
print()
|
||||
|
||||
steps_2 = decomposer.decompose(test_request_2)
|
||||
print(decomposer.get_workflow_summary(steps_2))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user