refactor: Major project cleanup and reorganization

## Removed Duplicate Directories
- Deleted old `dashboard/` (replaced by atomizer-dashboard)
- Deleted old `mcp_server/` Python tools (moved model_discovery to optimization_engine)
- Deleted `tests/mcp_server/` (obsolete tests)
- Deleted `launch_dashboard.bat` (old launcher)

## Consolidated Code
- Moved `mcp_server/tools/model_discovery.py` to `optimization_engine/model_discovery/`
- Updated import in `optimization_config_builder.py`
- Deleted stub `extract_mass.py` (use extract_mass_from_bdf instead)
- Deleted unused `intelligent_setup.py` and `hybrid_study_creator.py`
- Archived `result_extractors/` to `archive/deprecated/`

## Documentation Cleanup
- Deleted deprecated `docs/06_PROTOCOLS_DETAILED/` (14 files)
- Archived dated dev docs to `docs/08_ARCHIVE/sessions/`
- Archived old plans to `docs/08_ARCHIVE/plans/`
- Updated `docs/protocols/README.md` with SYS_15

## Skills Consolidation
- Archived redundant study creation skills to `.claude/skills/archive/`
- Kept `core/study-creation-core.md` as canonical

## Housekeeping
- Updated `.gitignore` to prevent `nul` and `_dat_run*.dat`

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Antoine
2025-12-12 11:24:02 -05:00
parent 1bb201e0b7
commit d1261d62fd
58 changed files with 26 additions and 10731 deletions

View File

@@ -1,39 +0,0 @@
"""
Extract total structural mass
Auto-generated by Atomizer Phase 3 - pyNastran Research Agent
Pattern: generic_extraction
Element Type: General
Result Type: unknown
API: model.<result_type>[subcase]
"""
from pathlib import Path
from typing import Dict, Any
import numpy as np
from pyNastran.op2.op2 import OP2
def extract_generic(op2_file: Path):
"""Generic OP2 extraction - needs customization."""
from pyNastran.op2.op2 import OP2
model = OP2()
model.read_op2(str(op2_file))
# TODO: Customize extraction based on requirements
# Available: model.displacements, model.ctetra_stress, etc.
# Use model.get_op2_stats() to see available results
return {'result': None}
if __name__ == '__main__':
# Example usage
import sys
if len(sys.argv) > 1:
op2_file = Path(sys.argv[1])
result = extract_generic(op2_file)
print(f"Extraction result: {result}")
else:
print("Usage: python {sys.argv[0]} <op2_file>")

View File

@@ -1,914 +0,0 @@
"""
Hybrid Mode Study Creator - Complete Automation
This module provides COMPLETE automation for creating optimization studies:
1. Creates proper study structure (1_setup, 2_substudies, 3_reports)
2. Runs benchmarking to validate simulation setup
3. Auto-generates runner from workflow JSON
4. Provides progress monitoring
No user intervention required after workflow JSON is created.
"""
from pathlib import Path
from typing import Dict, Any, Optional, List
import json
import shutil
from datetime import datetime
class HybridStudyCreator:
"""
Complete automation for Hybrid Mode study creation.
Usage:
creator = HybridStudyCreator()
study = creator.create_from_workflow(
workflow_json_path="path/to/workflow.json",
model_files={"prt": "path.prt", "sim": "path.sim", "fem": "path.fem"},
study_name="my_optimization"
)
"""
def __init__(self):
self.project_root = Path(__file__).parent.parent
def create_from_workflow(
self,
workflow_json_path: Path,
model_files: Dict[str, Path],
study_name: str,
output_parent: Optional[Path] = None
) -> Path:
"""
Create complete study from workflow JSON with full automation.
Args:
workflow_json_path: Path to workflow JSON config
model_files: Dict with keys 'prt', 'sim', 'fem' (and optionally 'fem_i')
study_name: Name for the study
output_parent: Parent directory for studies (default: project_root/studies)
Returns:
Path to created study directory
"""
print("="*80)
print(" HYBRID MODE - AUTOMATED STUDY CREATION")
print("="*80)
print()
# Step 1: Create study structure
print("[1/5] Creating study structure...")
study_dir = self._create_study_structure(study_name, output_parent)
print(f" [OK] Study directory: {study_dir.name}")
print()
# Step 2: Copy files
print("[2/5] Copying model files...")
self._copy_model_files(model_files, study_dir / "1_setup/model")
print(f" [OK] Copied {len(model_files)} files")
print()
# Step 3: Copy workflow JSON
print("[3/5] Installing workflow configuration...")
workflow_dest = study_dir / "1_setup/workflow_config.json"
shutil.copy2(workflow_json_path, workflow_dest)
with open(workflow_dest) as f:
workflow = json.load(f)
print(f" [OK] Workflow: {workflow.get('study_name', 'unnamed')}")
print(f" [OK] Variables: {len(workflow.get('design_variables', []))}")
print(f" [OK] Objectives: {len(workflow.get('objectives', []))}")
print()
# Step 4: Run benchmarking
print("[4/5] Running benchmarking (validating simulation setup)...")
benchmark_results = self._run_benchmarking(
study_dir / "1_setup/model" / model_files['prt'].name,
study_dir / "1_setup/model" / model_files['sim'].name,
workflow
)
if not benchmark_results['success']:
raise RuntimeError(f"Benchmarking failed: {benchmark_results['error']}")
print(f" [OK] Simulation validated")
print(f" [OK] Extracted {benchmark_results['n_results']} results")
print()
# Step 4.5: Generate configuration report
print("[4.5/5] Generating configuration report...")
self._generate_configuration_report(study_dir, workflow, benchmark_results)
print(f" [OK] Configuration report: 1_setup/CONFIGURATION_REPORT.md")
print()
# Step 5: Generate runner
print("[5/5] Generating optimization runner...")
runner_path = self._generate_runner(study_dir, workflow, benchmark_results)
print(f" [OK] Runner: {runner_path.name}")
print()
# Create README
self._create_readme(study_dir, workflow, benchmark_results)
print("="*80)
print(" STUDY CREATION COMPLETE")
print("="*80)
print()
print(f"Study location: {study_dir}")
print()
print("To run optimization:")
print(f" python {runner_path.relative_to(self.project_root)}")
print()
return study_dir
def _create_study_structure(self, study_name: str, output_parent: Optional[Path]) -> Path:
"""Create proper study folder structure."""
if output_parent is None:
output_parent = self.project_root / "studies"
study_dir = output_parent / study_name
# Create structure
(study_dir / "1_setup/model").mkdir(parents=True, exist_ok=True)
(study_dir / "2_results").mkdir(parents=True, exist_ok=True)
(study_dir / "3_reports").mkdir(parents=True, exist_ok=True)
return study_dir
def _copy_model_files(self, model_files: Dict[str, Path], dest_dir: Path):
"""Copy model files to study."""
for file_type, file_path in model_files.items():
if file_path and file_path.exists():
shutil.copy2(file_path, dest_dir / file_path.name)
def _run_benchmarking(
self,
prt_file: Path,
sim_file: Path,
workflow: Dict[str, Any]
) -> Dict[str, Any]:
"""
Run INTELLIGENT benchmarking to validate simulation setup.
This uses IntelligentSetup to:
1. Solve ALL solutions in the .sim file
2. Discover all available results
3. Match objectives to results automatically
4. Select optimal solution for optimization
Returns dict with:
- success: bool
- n_results: int (number of results extracted)
- results: dict (extracted values)
- solution_name: str (optimal solution to use for optimization)
- error: str (if failed)
"""
from optimization_engine.intelligent_setup import IntelligentSetup
try:
print(" Running INTELLIGENT benchmarking...")
print(" - Solving ALL solutions in .sim file")
print(" - Discovering all available results")
print(" - Matching objectives to results")
print()
# Run intelligent benchmarking
intelligent = IntelligentSetup()
benchmark_data = intelligent.run_complete_benchmarking(
prt_file, sim_file, workflow
)
if not benchmark_data['success']:
return {
'success': False,
'error': f"Intelligent benchmarking failed: {benchmark_data.get('error', 'Unknown')}"
}
# Display discovered information
print(f" [OK] Expressions found: {len(benchmark_data.get('expressions', {}))}")
print(f" [OK] Solutions found: {len(benchmark_data.get('solutions', {}))}")
print(f" [OK] Results discovered: {len(benchmark_data.get('available_results', {}))}")
# Display objective mapping
obj_mapping = benchmark_data.get('objective_mapping', {})
if 'objectives' in obj_mapping:
print(f" [OK] Objectives matched: {len(obj_mapping['objectives'])}")
for obj_name, obj_info in obj_mapping['objectives'].items():
solution = obj_info.get('solution', 'Unknown')
result_type = obj_info.get('result_type', 'Unknown')
confidence = obj_info.get('match_confidence', 'Unknown')
print(f" - {obj_name}: {result_type} from '{solution}' ({confidence} confidence)")
# Get recommended solution
recommended_solution = obj_mapping.get('primary_solution')
if recommended_solution:
print(f" [OK] Recommended solution: {recommended_solution}")
# Extract baseline values
extracted = {}
for obj in workflow.get('objectives', []):
extraction = obj.get('extraction', {})
action = extraction.get('action', '')
if 'frequency' in action.lower() or 'eigenvalue' in action.lower():
# Extract eigenvalues from discovered results
available_results = benchmark_data.get('available_results', {})
if 'eigenvalues' in available_results:
# Get op2 file from eigenvalues result
eig_result = available_results['eigenvalues']
op2_file = Path(eig_result['op2_path'])
freq = self._extract_frequency(op2_file, mode_number=1)
extracted['first_frequency'] = freq
print(f" Baseline first frequency: {freq:.4f} Hz")
elif 'displacement' in action.lower():
# Extract displacement from discovered results
available_results = benchmark_data.get('available_results', {})
if 'displacements' in available_results:
disp_result = available_results['displacements']
op2_file = Path(disp_result['op2_path'])
disp = self._extract_displacement(op2_file)
extracted['max_displacement'] = disp
print(f" Baseline max displacement: {disp:.6f} mm")
elif 'stress' in action.lower():
# Extract stress from discovered results
available_results = benchmark_data.get('available_results', {})
if 'stresses' in available_results:
stress_result = available_results['stresses']
op2_file = Path(stress_result['op2_path'])
stress = self._extract_stress(op2_file)
extracted['max_stress'] = stress
print(f" Baseline max stress: {stress:.2f} MPa")
return {
'success': True,
'n_results': len(extracted),
'results': extracted,
'solution_name': recommended_solution,
'benchmark_data': benchmark_data # Include full benchmarking data
}
except Exception as e:
return {
'success': False,
'error': str(e)
}
def _extract_frequency(self, op2_file: Path, mode_number: int = 1) -> float:
"""Extract eigenfrequency from OP2."""
from pyNastran.op2.op2 import OP2
import numpy as np
model = OP2()
model.read_op2(str(op2_file))
if not hasattr(model, 'eigenvalues') or len(model.eigenvalues) == 0:
raise ValueError("No eigenvalues found in OP2 file")
subcase = list(model.eigenvalues.keys())[0]
eig_obj = model.eigenvalues[subcase]
eigenvalue = eig_obj.eigenvalues[mode_number - 1]
angular_freq = np.sqrt(eigenvalue)
frequency_hz = angular_freq / (2 * np.pi)
return float(frequency_hz)
def _extract_displacement(self, op2_file: Path) -> float:
"""Extract max displacement from OP2."""
from pyNastran.op2.op2 import OP2
import numpy as np
model = OP2()
model.read_op2(str(op2_file))
if hasattr(model, 'displacements') and len(model.displacements) > 0:
subcase = list(model.displacements.keys())[0]
disp_obj = model.displacements[subcase]
translations = disp_obj.data[0, :, :3] # [time, node, tx/ty/tz]
magnitudes = np.linalg.norm(translations, axis=1)
return float(np.max(magnitudes))
raise ValueError("No displacements found in OP2 file")
def _extract_stress(self, op2_file: Path) -> float:
"""Extract max von Mises stress from OP2."""
from pyNastran.op2.op2 import OP2
import numpy as np
model = OP2()
model.read_op2(str(op2_file))
# Try different stress result locations
if hasattr(model, 'cquad4_stress') and len(model.cquad4_stress) > 0:
subcase = list(model.cquad4_stress.keys())[0]
stress_obj = model.cquad4_stress[subcase]
von_mises = stress_obj.data[0, :, 7] # von Mises typically at index 7
return float(np.max(von_mises))
raise ValueError("No stress results found in OP2 file")
def _generate_runner(
self,
study_dir: Path,
workflow: Dict[str, Any],
benchmark_results: Dict[str, Any]
) -> Path:
"""Generate optimization runner script."""
runner_path = study_dir / "run_optimization.py"
# Detect result types from workflow
extracts_frequency = any(
'frequency' in obj.get('extraction', {}).get('action', '').lower()
for obj in workflow.get('objectives', [])
)
# Generate extractor function based on workflow
extractor_code = self._generate_extractor_code(workflow)
runner_code = f'''"""
Auto-generated optimization runner
Created: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
"""
import sys
from pathlib import Path
# Add project root to path
project_root = Path(__file__).parent.parent.parent
sys.path.insert(0, str(project_root))
import json
import optuna
from optimization_engine.nx_updater import NXParameterUpdater
from optimization_engine.nx_solver import NXSolver
{extractor_code}
def main():
print("="*80)
print(" {workflow.get('study_name', 'OPTIMIZATION').upper()}")
print("="*80)
print()
# Load workflow
config_file = Path(__file__).parent / "1_setup/workflow_config.json"
with open(config_file) as f:
workflow = json.load(f)
print("Workflow loaded:")
print(f" Request: {workflow.get('optimization_request', 'N/A')}")
print(f" Variables: {len(workflow.get('design_variables', []))}")
print()
# Setup paths
prt_file = Path(__file__).parent / "1_setup/model" / [f for f in (Path(__file__).parent / "1_setup/model").glob("*.prt")][0].name
sim_file = Path(__file__).parent / "1_setup/model" / [f for f in (Path(__file__).parent / "1_setup/model").glob("*.sim")][0].name
output_dir = Path(__file__).parent / "2_results"
reports_dir = Path(__file__).parent / "3_reports"
output_dir.mkdir(parents=True, exist_ok=True)
reports_dir.mkdir(parents=True, exist_ok=True)
# Initialize
updater = NXParameterUpdater(prt_file)
solver = NXSolver()
# Create Optuna study
study_name = "{workflow.get('study_name', 'optimization')}"
storage = f"sqlite:///{{output_dir / 'study.db'}}"
study = optuna.create_study(
study_name=study_name,
storage=storage,
load_if_exists=True,
direction="minimize"
)
# Initialize incremental history
history_file = output_dir / 'optimization_history_incremental.json'
history = []
if history_file.exists():
with open(history_file) as f:
history = json.load(f)
def objective(trial):
# Sample design variables
params = {{}}
for var in workflow['design_variables']:
name = var['parameter']
bounds = var['bounds']
params[name] = trial.suggest_float(name, bounds[0], bounds[1])
print(f"\\nTrial {{trial.number}}:")
for name, value in params.items():
print(f" {{name}} = {{value:.2f}}")
# Update model
updater.update_expressions(params)
# Run simulation with the optimal solution
result = solver.run_simulation(sim_file, solution_name="{benchmark_results.get('solution_name')}")
if not result['success']:
raise RuntimeError(f"Simulation failed: {{result.get('errors', 'Unknown')}}")
op2_file = result['op2_file']
# Extract results and calculate objective
results = extract_results(op2_file, workflow)
# Print results
for name, value in results.items():
print(f" {{name}} = {{value:.4f}}")
# Calculate objective (from first objective in workflow)
obj_config = workflow['objectives'][0]
result_name = list(results.keys())[0]
# For target-matching objectives, compute error from target
if 'target_frequency' in obj_config.get('extraction', {{}}).get('params', {{}}):
target = obj_config['extraction']['params']['target_frequency']
objective_value = abs(results[result_name] - target)
print(f" Frequency: {{results[result_name]:.4f}} Hz, Target: {{target}} Hz, Error: {{objective_value:.4f}} Hz")
elif obj_config['goal'] == 'minimize':
objective_value = results[result_name]
else:
objective_value = -results[result_name]
print(f" Objective = {{objective_value:.4f}}")
# Save to incremental history
trial_record = {{
'trial_number': trial.number,
'design_variables': params,
'results': results,
'objective': objective_value
}}
history.append(trial_record)
with open(history_file, 'w') as f:
json.dump(history, f, indent=2)
return objective_value
# Run optimization
n_trials = 10
print(f"\\nRunning {{n_trials}} trials...")
print("="*80)
print()
study.optimize(objective, n_trials=n_trials)
# Results
print()
print("="*80)
print(" OPTIMIZATION COMPLETE")
print("="*80)
print()
print(f"Best trial: #{{study.best_trial.number}}")
for name, value in study.best_params.items():
print(f" {{name}} = {{value:.2f}}")
print(f"\\nBest objective = {{study.best_value:.4f}}")
print()
# Generate human-readable markdown report with graphs
print("Generating optimization report...")
from optimization_engine.generate_report_markdown import generate_markdown_report
# Extract target frequency from workflow objectives
target_value = None
tolerance = 0.1
for obj in workflow.get('objectives', []):
if 'target_frequency' in obj.get('extraction', {{}}).get('params', {{}}):
target_value = obj['extraction']['params']['target_frequency']
break
# Generate markdown report with graphs
report = generate_markdown_report(history_file, target_value=target_value, tolerance=tolerance)
report_file = reports_dir / 'OPTIMIZATION_REPORT.md'
with open(report_file, 'w', encoding='utf-8') as f:
f.write(report)
print(f"✓ Markdown report with graphs saved to: {{report_file}}")
print()
if __name__ == "__main__":
main()
'''
with open(runner_path, 'w', encoding='utf-8') as f:
f.write(runner_code)
return runner_path
def _generate_extractor_code(self, workflow: Dict[str, Any]) -> str:
"""Generate extractor function based on workflow objectives."""
# Detect what needs to be extracted
needs_frequency = False
needs_displacement = False
needs_stress = False
for obj in workflow.get('objectives', []):
action = obj.get('extraction', {}).get('action', '').lower()
if 'frequency' in action or 'eigenvalue' in action:
needs_frequency = True
elif 'displacement' in action:
needs_displacement = True
elif 'stress' in action:
needs_stress = True
code = '''
def extract_results(op2_file, workflow):
"""Extract results from OP2 file based on workflow objectives."""
from pyNastran.op2.op2 import OP2
import numpy as np
model = OP2()
model.read_op2(str(op2_file))
results = {}
'''
if needs_frequency:
code += '''
# Extract first frequency
if hasattr(model, 'eigenvalues') and len(model.eigenvalues) > 0:
subcase = list(model.eigenvalues.keys())[0]
eig_obj = model.eigenvalues[subcase]
eigenvalue = eig_obj.eigenvalues[0]
angular_freq = np.sqrt(eigenvalue)
frequency_hz = angular_freq / (2 * np.pi)
results['first_frequency'] = float(frequency_hz)
else:
raise ValueError("No eigenvalues found in OP2 file")
'''
if needs_displacement:
code += '''
# Extract max displacement
if hasattr(model, 'displacements') and len(model.displacements) > 0:
subcase = list(model.displacements.keys())[0]
disp_obj = model.displacements[subcase]
translations = disp_obj.data[0, :, :3]
magnitudes = np.linalg.norm(translations, axis=1)
results['max_displacement'] = float(np.max(magnitudes))
'''
if needs_stress:
code += '''
# Extract max stress
if hasattr(model, 'cquad4_stress') and len(model.cquad4_stress) > 0:
subcase = list(model.cquad4_stress.keys())[0]
stress_obj = model.cquad4_stress[subcase]
von_mises = stress_obj.data[0, :, 7]
results['max_stress'] = float(np.max(von_mises))
'''
code += '''
return results
'''
return code
def _generate_configuration_report(
self,
study_dir: Path,
workflow: Dict[str, Any],
benchmark_results: Dict[str, Any]
):
"""
Generate a comprehensive configuration report with ALL setup details.
This creates 1_setup/CONFIGURATION_REPORT.md with:
- User's optimization request
- All discovered expressions
- All discovered solutions
- All available result types
- Objective matching details
- Baseline values
- Warnings and issues
"""
report_path = study_dir / "1_setup" / "CONFIGURATION_REPORT.md"
# Get benchmark data
benchmark_data = benchmark_results.get('benchmark_data', {})
expressions = benchmark_data.get('expressions', {})
solutions = benchmark_data.get('solutions', {})
available_results = benchmark_data.get('available_results', {})
obj_mapping = benchmark_data.get('objective_mapping', {})
# Build expressions section
expressions_md = "## Model Expressions\n\n"
if expressions:
expressions_md += f"**Total expressions found: {len(expressions)}**\n\n"
expressions_md += "| Expression Name | Current Value | Units | Formula |\n"
expressions_md += "|----------------|---------------|-------|----------|\n"
for name, info in sorted(expressions.items()):
value = info.get('value', 'N/A')
units = info.get('units', '')
formula = info.get('formula', '')
expressions_md += f"| {name} | {value} | {units} | {formula} |\n"
else:
expressions_md += "*No expressions found in model*\n"
# Build solutions section
solutions_md = "## Simulation Solutions\n\n"
if solutions:
# Handle both old format (solution_names list) and new format (dict)
if isinstance(solutions, dict):
if 'solution_names' in solutions:
# Old format: just solution names
solution_names = solutions.get('solution_names', [])
num_solved = solutions.get('num_solved', 0)
num_failed = solutions.get('num_failed', 0)
num_skipped = solutions.get('num_skipped', 0)
solutions_md += f"**Solutions discovered**: {len(solution_names)}\n"
solutions_md += f"**Solved**: {num_solved} | **Failed**: {num_failed} | **Skipped**: {num_skipped}\n\n"
if solution_names:
for sol_name in solution_names:
solutions_md += f"- {sol_name}\n"
else:
solutions_md += "*No solution names retrieved*\n"
else:
# New format: dict of solution details
solutions_md += f"**Total solutions found: {len(solutions)}**\n\n"
for sol_name, sol_info in solutions.items():
solutions_md += f"### {sol_name}\n\n"
solutions_md += f"- **Type**: {sol_info.get('type', 'Unknown')}\n"
solutions_md += f"- **OP2 File**: `{sol_info.get('op2_path', 'N/A')}`\n\n"
else:
solutions_md += "*No solutions discovered - check if benchmarking solved all solutions*\n"
# Build available results section
results_md = "## Available Results\n\n"
if available_results:
results_md += f"**Total result types discovered: {len(available_results)}**\n\n"
for result_type, result_info in available_results.items():
results_md += f"### {result_type}\n\n"
results_md += f"- **Solution**: {result_info.get('solution', 'Unknown')}\n"
results_md += f"- **OP2 File**: `{result_info.get('op2_path', 'N/A')}`\n"
if 'sample_value' in result_info:
results_md += f"- **Sample Value**: {result_info['sample_value']}\n"
results_md += "\n"
else:
results_md += "*No results discovered - check if simulations solved successfully*\n"
# Build objective matching section
matching_md = "## Objective Matching\n\n"
if 'objectives' in obj_mapping and obj_mapping['objectives']:
matching_md += f"**Objectives matched: {len(obj_mapping['objectives'])}**\n\n"
for obj_name, obj_info in obj_mapping['objectives'].items():
solution = obj_info.get('solution', 'NONE')
result_type = obj_info.get('result_type', 'Unknown')
confidence = obj_info.get('match_confidence', 'Unknown')
extractor = obj_info.get('extractor', 'Unknown')
op2_file = obj_info.get('op2_file', 'N/A')
error = obj_info.get('error', None)
matching_md += f"### {obj_name}\n\n"
matching_md += f"- **Result Type**: {result_type}\n"
matching_md += f"- **Solution**: {solution}\n"
matching_md += f"- **Confidence**: {confidence}\n"
matching_md += f"- **Extractor**: `{extractor}`\n"
matching_md += f"- **OP2 File**: `{op2_file}`\n"
if error:
matching_md += f"- **⚠️ ERROR**: {error}\n"
matching_md += "\n"
# Add primary solution
primary_solution = obj_mapping.get('primary_solution')
if primary_solution:
matching_md += f"**Primary Solution Selected**: `{primary_solution}`\n\n"
matching_md += "This solution will be used for optimization.\n\n"
else:
matching_md += "*No objectives matched - check workflow configuration*\n"
# Build baseline values section
baseline_md = "## Baseline Values\n\n"
baseline_results = benchmark_results.get('results', {})
if baseline_results:
baseline_md += "Values extracted from the initial (unoptimized) model:\n\n"
for key, value in baseline_results.items():
baseline_md += f"- **{key}**: {value}\n"
else:
baseline_md += "*No baseline values extracted*\n"
# Build warnings section
warnings_md = "## Warnings and Issues\n\n"
warnings = []
# Check for missing eigenvalues
for obj_name, obj_info in obj_mapping.get('objectives', {}).items():
if obj_info.get('error'):
warnings.append(f"- ⚠️ **{obj_name}**: {obj_info['error']}")
# Check for no solutions
if not solutions:
warnings.append("- ⚠️ **No solutions discovered**: Benchmarking may not have solved all solutions")
# Check for no results
if not available_results:
warnings.append("- ⚠️ **No results available**: Check if simulations ran successfully")
if warnings:
warnings_md += "\n".join(warnings) + "\n"
else:
warnings_md += "✅ No issues detected!\n"
# Build full report
content = f'''# Configuration Report
**Study**: {workflow.get('study_name', study_dir.name)}
**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
---
## Optimization Request
**User's Goal**:
> {workflow.get('optimization_request', '*No description provided*')}
**Design Variables**: {len(workflow.get('design_variables', []))}
| Variable | Min | Max |
|----------|-----|-----|
'''
for var in workflow.get('design_variables', []):
param = var.get('parameter', 'Unknown')
bounds = var.get('bounds', [0, 0])
content += f"| {param} | {bounds[0]} | {bounds[1]} |\n"
content += f'''
**Objectives**: {len(workflow.get('objectives', []))}
| Objective | Goal |
|-----------|------|
'''
for obj in workflow.get('objectives', []):
obj_name = obj.get('name', 'Unknown')
goal = obj.get('goal', 'Unknown')
content += f"| {obj_name} | {goal} |\n"
content += f'''
---
{expressions_md}
---
{solutions_md}
---
{results_md}
---
{matching_md}
---
{baseline_md}
---
{warnings_md}
---
## Next Steps
1. ✅ Study structure created
2. ✅ Benchmarking complete
3. ✅ Configuration validated
4. ➡️ **Run optimization**: `python run_optimization.py`
---
*This report was auto-generated by the Intelligent Setup System*
'''
with open(report_path, 'w', encoding='utf-8') as f:
f.write(content)
def _create_readme(
self,
study_dir: Path,
workflow: Dict[str, Any],
benchmark_results: Dict[str, Any]
):
"""Create README for the study."""
readme_path = study_dir / "README.md"
# Format design variables
vars_md = ""
for var in workflow.get('design_variables', []):
bounds = var.get('bounds', [0, 1])
desc = var.get('description', '')
vars_md += f"- `{var['parameter']}`: {bounds[0]}-{bounds[1]} mm"
if desc:
vars_md += f" - {desc}"
vars_md += "\n"
# Format objectives
objs_md = ""
for obj in workflow.get('objectives', []):
objs_md += f"- {obj['goal'].title()} {obj['name']}\n"
# Format benchmark results
bench_md = ""
if benchmark_results.get('success'):
for name, value in benchmark_results.get('results', {}).items():
bench_md += f"- {name}: {value:.4f}\n"
content = f'''# {workflow.get('study_name', 'Optimization Study')}
**Created**: {datetime.now().strftime("%Y-%m-%d")}
**Mode**: Hybrid (Workflow JSON + Auto-generated runner)
## Problem Description
{workflow.get('optimization_request', 'N/A')}
### Design Variables
{vars_md}
### Objectives
{objs_md}
## Benchmark Results
Baseline simulation (default geometry):
{bench_md}
## Study Structure
```
{study_dir.name}/
├── 1_setup/
│ ├── model/ # FEM model files
│ └── workflow_config.json # Optimization specification
├── 2_substudies/
│ └── results/ # Optimization results
├── 3_reports/
├── run_optimization.py # Auto-generated runner
└── README.md # This file
```
## Running the Optimization
```bash
python run_optimization.py
```
This will:
1. Load workflow configuration
2. Initialize NX model updater and solver
3. Run {10} optimization trials
4. Save results to `2_substudies/results/`
## Results
After optimization completes, check:
- `2_substudies/results/study.db` - Optuna database
- `2_substudies/results/` - Best design parameters
---
**Created by Hybrid Mode** - 90% automation, production ready!
'''
with open(readme_path, 'w', encoding='utf-8') as f:
f.write(content)
if __name__ == "__main__":
# Example usage
creator = HybridStudyCreator()
# Example: Create study from workflow JSON
study_dir = creator.create_from_workflow(
workflow_json_path=Path("path/to/workflow.json"),
model_files={
'prt': Path("path/to/model.prt"),
'sim': Path("path/to/model.sim"),
'fem': Path("path/to/model.fem")
},
study_name="example_study"
)
print(f"Study created: {study_dir}")

View File

@@ -1,694 +0,0 @@
"""
Intelligent Setup System for Atomizer
This module provides COMPLETE autonomy for optimization setup:
1. Solves ALL solutions in .sim file
2. Discovers all available results (eigenvalues, displacements, stresses, etc.)
3. Catalogs expressions and parameters
4. Matches workflow objectives to available results
5. Auto-selects correct solution for optimization
6. Generates optimized runner code
This is the level of intelligence Atomizer should have.
"""
from pathlib import Path
from typing import Dict, Any, List, Optional, Tuple
import json
from datetime import datetime
class IntelligentSetup:
"""
Intelligent benchmarking and setup system.
Proactively discovers EVERYTHING about a simulation:
- All solutions (Static, Modal, Buckling, etc.)
- All result types (displacements, stresses, eigenvalues, etc.)
- All expressions and parameters
- Matches user objectives to available data
"""
def __init__(self):
self.project_root = Path(__file__).parent.parent
def run_complete_benchmarking(
self,
prt_file: Path,
sim_file: Path,
workflow: Dict[str, Any]
) -> Dict[str, Any]:
"""
Run COMPLETE benchmarking:
1. Extract ALL expressions from .prt
2. Solve ALL solutions in .sim
3. Analyze ALL result files
4. Match objectives to available results
5. Determine optimal solution for each objective
Returns:
Complete catalog of available data and recommendations
"""
print()
print("="*80)
print(" INTELLIGENT SETUP - COMPLETE ANALYSIS")
print("="*80)
print()
results = {
'success': False,
'expressions': {},
'solutions': {},
'available_results': {},
'objective_mapping': {},
'recommended_solution': None,
'errors': []
}
try:
# Phase 1: Extract ALL expressions
print("[Phase 1/4] Extracting ALL expressions from model...")
expressions = self._extract_all_expressions(prt_file)
results['expressions'] = expressions
print(f" [OK] Found {len(expressions)} expressions")
for name, info in list(expressions.items())[:5]:
val = info.get('value', 'N/A')
units = info.get('units', '')
print(f" - {name}: {val} {units}")
if len(expressions) > 5:
print(f" ... and {len(expressions) - 5} more")
print()
# Phase 2: Solve ALL solutions
print("[Phase 2/4] Solving ALL solutions in .sim file...")
solutions_info = self._solve_all_solutions(sim_file)
results['solutions'] = solutions_info
print(f" [OK] Solved {solutions_info['num_solved']} solutions")
for sol_name in solutions_info['solution_names']:
print(f" - {sol_name}")
print()
# Phase 3: Analyze ALL result files
print("[Phase 3/4] Analyzing ALL result files...")
available_results = self._analyze_all_results(sim_file.parent, solutions_info)
results['available_results'] = available_results
print(f" [OK] Found {len(available_results)} result files")
for result_type, details in available_results.items():
print(f" - {result_type}: {details['count']} entries in {details['file']}")
print()
# Phase 4: Match objectives to results
print("[Phase 4/4] Matching objectives to available results...")
mapping = self._match_objectives_to_results(workflow, available_results, solutions_info)
results['objective_mapping'] = mapping
results['recommended_solution'] = mapping.get('primary_solution')
print(f" [OK] Objective mapping complete")
for obj_name, obj_info in mapping['objectives'].items():
print(f" - {obj_name}")
print(f" Solution: {obj_info.get('solution', 'NONE')}")
print(f" Result type: {obj_info.get('result_type', 'Unknown')}")
print(f" Extractor: {obj_info.get('extractor', 'Unknown')}")
if 'error' in obj_info:
print(f" [WARNING] {obj_info['error']}")
print()
if mapping.get('primary_solution'):
print(f" [RECOMMENDATION] Use solution: {mapping['primary_solution']}")
print()
results['success'] = True
except Exception as e:
results['errors'].append(str(e))
print(f" [ERROR] {e}")
print()
print("="*80)
print(" ANALYSIS COMPLETE")
print("="*80)
print()
return results
def _extract_all_expressions(self, prt_file: Path) -> Dict[str, Any]:
"""Extract ALL expressions from .prt file."""
from optimization_engine.nx_updater import NXParameterUpdater
updater = NXParameterUpdater(prt_file)
return updater.get_all_expressions()
def _solve_all_solutions(self, sim_file: Path) -> Dict[str, Any]:
"""
Solve ALL solutions in .sim file using NXOpen journal approach.
CRITICAL: This method updates the .fem file from the .prt before solving!
This is required when geometry changes (modal analysis, etc.)
Returns dict with:
- num_solved: int
- num_failed: int
- num_skipped: int
- solution_names: List[str]
"""
# Create journal to solve all solutions
journal_code = f'''
import sys
import NXOpen
import NXOpen.CAE
def main(args):
if len(args) < 1:
print("ERROR: No .sim file path provided")
return False
sim_file_path = args[0]
theSession = NXOpen.Session.GetSession()
# Open the .sim file
print(f"[JOURNAL] Opening simulation: {{sim_file_path}}")
basePart1, partLoadStatus1 = theSession.Parts.OpenActiveDisplay(
sim_file_path,
NXOpen.DisplayPartOption.AllowAdditional
)
partLoadStatus1.Dispose()
workSimPart = theSession.Parts.BaseWork
print(f"[JOURNAL] Simulation opened successfully")
# CRITICAL: Update FEM from master model (.prt)
# This is required when geometry has changed (modal analysis, etc.)
print("[JOURNAL] Updating FEM from master model...")
simSimulation = workSimPart.Simulation
# Get all FEModels and update them
femModels = simSimulation.FemParts
for i in range(femModels.Length):
femPart = femModels.Item(i)
print(f"[JOURNAL] Updating FEM: {{femPart.Name}}")
# Update the FEM from associated CAD part
femPart.UpdateFemodel()
# Save after FEM update
print("[JOURNAL] Saving after FEM update...")
partSaveStatus = workSimPart.Save(
NXOpen.BasePart.SaveComponents.TrueValue,
NXOpen.BasePart.CloseAfterSave.FalseValue
)
partSaveStatus.Dispose()
# Get all solutions
theCAESimSolveManager = NXOpen.CAE.SimSolveManager.GetSimSolveManager(theSession)
# Solve all solutions
print("[JOURNAL] Solving ALL solutions...")
num_solved, num_failed, num_skipped = theCAESimSolveManager.SolveAllSolutions(
NXOpen.CAE.SimSolution.SolveOption.Solve,
NXOpen.CAE.SimSolution.SetupCheckOption.CompleteCheckAndOutputErrors,
NXOpen.CAE.SimSolution.SolveMode.Foreground,
False
)
# Get solution names
simSimulation = workSimPart.FindObject("Simulation")
solutions = []
for obj in simSimulation.GetAllDescendents():
if "Solution[" in str(obj):
solutions.append(str(obj))
# Save to write output files
print("[JOURNAL] Saving simulation to write output files...")
partSaveStatus = workSimPart.Save(
NXOpen.BasePart.SaveComponents.TrueValue,
NXOpen.BasePart.CloseAfterSave.FalseValue
)
partSaveStatus.Dispose()
# Output results
print(f"ATOMIZER_SOLUTIONS_SOLVED: {{num_solved}}")
print(f"ATOMIZER_SOLUTIONS_FAILED: {{num_failed}}")
print(f"ATOMIZER_SOLUTIONS_SKIPPED: {{num_skipped}}")
for sol in solutions:
print(f"ATOMIZER_SOLUTION: {{sol}}")
return True
if __name__ == '__main__':
success = main(sys.argv[1:])
sys.exit(0 if success else 1)
'''
# Write and execute journal
journal_path = sim_file.parent / "_solve_all_solutions.py"
with open(journal_path, 'w') as f:
f.write(journal_code)
# Run journal via NX
from optimization_engine.nx_solver import NXSolver
solver = NXSolver()
import subprocess
from config import NX_RUN_JOURNAL
result = subprocess.run(
[str(NX_RUN_JOURNAL), str(journal_path), str(sim_file)],
capture_output=True,
text=True,
timeout=600
)
# Parse output
num_solved = 0
num_failed = 0
num_skipped = 0
solution_names = []
for line in result.stdout.split('\n'):
if 'ATOMIZER_SOLUTIONS_SOLVED:' in line:
num_solved = int(line.split(':')[1].strip())
elif 'ATOMIZER_SOLUTIONS_FAILED:' in line:
num_failed = int(line.split(':')[1].strip())
elif 'ATOMIZER_SOLUTIONS_SKIPPED:' in line:
num_skipped = int(line.split(':')[1].strip())
elif 'ATOMIZER_SOLUTION:' in line:
sol_name = line.split(':', 1)[1].strip()
solution_names.append(sol_name)
# Clean up
journal_path.unlink()
return {
'num_solved': num_solved,
'num_failed': num_failed,
'num_skipped': num_skipped,
'solution_names': solution_names
}
def _analyze_all_results(
self,
model_dir: Path,
solutions_info: Dict[str, Any]
) -> Dict[str, Any]:
"""
Analyze ALL .op2 files to discover available results.
Returns dict mapping result types to details:
{
'eigenvalues': {'file': 'xxx.op2', 'count': 10, 'solution': 'Modal'},
'displacements': {'file': 'yyy.op2', 'count': 613, 'solution': 'Static'},
'stress_quad4': {'file': 'yyy.op2', 'count': 561, 'solution': 'Static'},
...
}
"""
from pyNastran.op2.op2 import OP2
available = {}
# Find all .op2 files
op2_files = list(model_dir.glob("*.op2"))
for op2_file in op2_files:
try:
model = OP2()
model.read_op2(str(op2_file))
# Check for eigenvalues
if hasattr(model, 'eigenvalues') and len(model.eigenvalues) > 0:
subcase = list(model.eigenvalues.keys())[0]
eig_obj = model.eigenvalues[subcase]
available['eigenvalues'] = {
'file': op2_file.name,
'count': len(eig_obj.eigenvalues),
'solution': self._guess_solution_from_filename(op2_file.name),
'op2_path': op2_file
}
# Check for displacements
if hasattr(model, 'displacements') and len(model.displacements) > 0:
subcase = list(model.displacements.keys())[0]
disp_obj = model.displacements[subcase]
available['displacements'] = {
'file': op2_file.name,
'count': disp_obj.data.shape[1], # Number of nodes
'solution': self._guess_solution_from_filename(op2_file.name),
'op2_path': op2_file
}
# Check for stresses
if hasattr(model, 'cquad4_stress') and len(model.cquad4_stress) > 0:
subcase = list(model.cquad4_stress.keys())[0]
stress_obj = model.cquad4_stress[subcase]
available['stress_quad4'] = {
'file': op2_file.name,
'count': stress_obj.data.shape[1], # Number of elements
'solution': self._guess_solution_from_filename(op2_file.name),
'op2_path': op2_file
}
# Check for forces
if hasattr(model, 'cquad4_force') and len(model.cquad4_force) > 0:
available['force_quad4'] = {
'file': op2_file.name,
'count': len(model.cquad4_force),
'solution': self._guess_solution_from_filename(op2_file.name),
'op2_path': op2_file
}
except Exception as e:
print(f" [WARNING] Could not analyze {op2_file.name}: {e}")
return available
def _guess_solution_from_filename(self, filename: str) -> str:
"""Guess solution type from filename."""
filename_lower = filename.lower()
if 'normal_modes' in filename_lower or 'modal' in filename_lower:
return 'Solution_Normal_Modes'
elif 'buckling' in filename_lower:
return 'Solution_Buckling'
elif 'static' in filename_lower or 'solution_1' in filename_lower:
return 'Solution_1'
else:
return 'Unknown'
def _match_objectives_to_results(
self,
workflow: Dict[str, Any],
available_results: Dict[str, Any],
solutions_info: Dict[str, Any]
) -> Dict[str, Any]:
"""
Intelligently match workflow objectives to available results.
Returns:
{
'objectives': {
'obj_name': {
'solution': 'Solution_Normal_Modes',
'result_type': 'eigenvalues',
'extractor': 'extract_first_frequency',
'op2_file': Path(...)
}
},
'primary_solution': 'Solution_Normal_Modes' # Most important solution
}
"""
mapping = {
'objectives': {},
'primary_solution': None
}
for obj in workflow.get('objectives', []):
obj_name = obj.get('name', 'unnamed')
extraction = obj.get('extraction', {})
action = extraction.get('action', '').lower()
# Match based on objective type
if 'frequency' in action or 'eigenvalue' in action or 'modal' in action:
if 'eigenvalues' in available_results:
result_info = available_results['eigenvalues']
mapping['objectives'][obj_name] = {
'solution': result_info['solution'],
'result_type': 'eigenvalues',
'extractor': 'extract_first_frequency',
'op2_file': result_info['op2_path'],
'match_confidence': 'HIGH'
}
if not mapping['primary_solution']:
mapping['primary_solution'] = result_info['solution']
else:
mapping['objectives'][obj_name] = {
'solution': 'NONE',
'result_type': 'eigenvalues',
'extractor': 'extract_first_frequency',
'op2_file': None,
'match_confidence': 'ERROR',
'error': 'No eigenvalue results found - check if modal solution exists'
}
elif 'displacement' in action or 'deflection' in action:
if 'displacements' in available_results:
result_info = available_results['displacements']
mapping['objectives'][obj_name] = {
'solution': result_info['solution'],
'result_type': 'displacements',
'extractor': 'extract_max_displacement',
'op2_file': result_info['op2_path'],
'match_confidence': 'HIGH'
}
if not mapping['primary_solution']:
mapping['primary_solution'] = result_info['solution']
elif 'stress' in action or 'von_mises' in action:
if 'stress_quad4' in available_results:
result_info = available_results['stress_quad4']
mapping['objectives'][obj_name] = {
'solution': result_info['solution'],
'result_type': 'stress',
'extractor': 'extract_max_stress',
'op2_file': result_info['op2_path'],
'match_confidence': 'HIGH'
}
if not mapping['primary_solution']:
mapping['primary_solution'] = result_info['solution']
return mapping
def generate_intelligent_runner(
self,
study_dir: Path,
workflow: Dict[str, Any],
benchmark_results: Dict[str, Any]
) -> Path:
"""
Generate optimized runner based on intelligent analysis.
Uses benchmark results to:
1. Select correct solution to solve
2. Generate correct extractors
3. Optimize for speed (only solve what's needed)
"""
runner_path = study_dir / "run_optimization.py"
# Get recommended solution
recommended_solution = benchmark_results.get('recommended_solution', 'Solution_1')
objective_mapping = benchmark_results.get('objective_mapping', {})
# Generate extractor functions based on actual available results
extractor_code = self._generate_intelligent_extractors(objective_mapping)
runner_code = f'''"""
Auto-generated INTELLIGENT optimization runner
Created: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
Intelligently configured based on complete benchmarking:
- Solution: {recommended_solution}
- Extractors: Auto-matched to available results
"""
import sys
from pathlib import Path
# Add project root to path
project_root = Path(__file__).parent.parent.parent
sys.path.insert(0, str(project_root))
import json
import optuna
from optimization_engine.nx_updater import NXParameterUpdater
from optimization_engine.nx_solver import NXSolver
{extractor_code}
def main():
print("="*80)
print(" {workflow.get('study_name', 'OPTIMIZATION').upper()}")
print(" Intelligent Setup - Auto-configured")
print("="*80)
print()
# Load workflow
config_file = Path(__file__).parent / "1_setup/workflow_config.json"
with open(config_file) as f:
workflow = json.load(f)
print("Configuration:")
print(f" Target solution: {recommended_solution}")
print(f" Objectives: {len(workflow.get('objectives', []))}")
print(f" Variables: {len(workflow.get('design_variables', []))}")
print()
# Setup paths
model_dir = Path(__file__).parent / "1_setup/model"
prt_file = list(model_dir.glob("*.prt"))[0]
sim_file = list(model_dir.glob("*.sim"))[0]
output_dir = Path(__file__).parent / "2_substudies/results"
output_dir.mkdir(parents=True, exist_ok=True)
# Initialize
updater = NXParameterUpdater(prt_file)
solver = NXSolver()
# Create Optuna study
study_name = "{workflow.get('study_name', 'optimization')}"
storage = f"sqlite:///{{output_dir / 'study.db'}}"
study = optuna.create_study(
study_name=study_name,
storage=storage,
load_if_exists=True,
direction="minimize"
)
def objective(trial):
# Sample design variables
params = {{}}
for var in workflow['design_variables']:
name = var['parameter']
bounds = var['bounds']
params[name] = trial.suggest_float(name, bounds[0], bounds[1])
print(f"\\nTrial {{trial.number}}:")
for name, value in params.items():
print(f" {{name}} = {{value:.2f}}")
# Update model
updater.update_expressions(params)
# Run SPECIFIC solution (optimized - only what's needed)
result = solver.run_simulation(
sim_file,
solution_name="{recommended_solution}"
)
if not result['success']:
raise RuntimeError(f"Simulation failed: {{result.get('errors', 'Unknown')}}")
op2_file = result['op2_file']
# Extract results
results = extract_results(op2_file, workflow)
# Print results
for name, value in results.items():
print(f" {{name}} = {{value:.4f}}")
# Calculate objective
obj_config = workflow['objectives'][0]
result_name = list(results.keys())[0]
if obj_config['goal'] == 'minimize':
objective_value = results[result_name]
else:
objective_value = -results[result_name]
print(f" Objective = {{objective_value:.4f}}")
return objective_value
# Run optimization
n_trials = 10
print(f"\\nRunning {{n_trials}} trials...")
print("="*80)
print()
study.optimize(objective, n_trials=n_trials)
# Results
print()
print("="*80)
print(" OPTIMIZATION COMPLETE")
print("="*80)
print()
print(f"Best trial: #{{study.best_trial.number}}")
for name, value in study.best_params.items():
print(f" {{name}} = {{value:.2f}}")
print(f"\\nBest objective = {{study.best_value:.4f}}")
print()
if __name__ == "__main__":
main()
'''
with open(runner_path, 'w') as f:
f.write(runner_code)
return runner_path
def _generate_intelligent_extractors(self, objective_mapping: Dict[str, Any]) -> str:
"""Generate extractor functions based on intelligent mapping."""
extractors = set()
for obj_name, obj_info in objective_mapping.get('objectives', {}).items():
if 'extractor' in obj_info:
extractors.add(obj_info['extractor'])
code = '''
def extract_results(op2_file, workflow):
"""Intelligently extract results based on benchmarking."""
from pyNastran.op2.op2 import OP2
import numpy as np
model = OP2()
model.read_op2(str(op2_file))
results = {}
'''
if 'extract_first_frequency' in extractors:
code += '''
# Extract first frequency (auto-matched to eigenvalues)
if hasattr(model, 'eigenvalues') and len(model.eigenvalues) > 0:
subcase = list(model.eigenvalues.keys())[0]
eig_obj = model.eigenvalues[subcase]
eigenvalue = eig_obj.eigenvalues[0]
angular_freq = np.sqrt(eigenvalue)
frequency_hz = angular_freq / (2 * np.pi)
results['first_frequency'] = float(frequency_hz)
'''
if 'extract_max_displacement' in extractors:
code += '''
# Extract max displacement (auto-matched to displacements)
if hasattr(model, 'displacements') and len(model.displacements) > 0:
subcase = list(model.displacements.keys())[0]
disp_obj = model.displacements[subcase]
translations = disp_obj.data[0, :, :3]
magnitudes = np.linalg.norm(translations, axis=1)
results['max_displacement'] = float(np.max(magnitudes))
'''
if 'extract_max_stress' in extractors:
code += '''
# Extract max stress (auto-matched to stress results)
if hasattr(model, 'cquad4_stress') and len(model.cquad4_stress) > 0:
subcase = list(model.cquad4_stress.keys())[0]
stress_obj = model.cquad4_stress[subcase]
von_mises = stress_obj.data[0, :, 7]
results['max_stress'] = float(np.max(von_mises))
'''
code += '''
return results
'''
return code
if __name__ == "__main__":
# Example usage
setup = IntelligentSetup()
# Run complete analysis
results = setup.run_complete_benchmarking(
prt_file=Path("path/to/model.prt"),
sim_file=Path("path/to/model.sim"),
workflow={'objectives': [{'name': 'freq', 'extraction': {'action': 'extract_frequency'}}]}
)
print("Analysis complete:")
print(json.dumps(results, indent=2, default=str))

View File

@@ -0,0 +1,19 @@
"""
Model Discovery Module
Tools for parsing and analyzing Siemens NX FEA model files:
- SimFileParser: Parse .sim files (both XML and binary)
- discover_fea_model: Main function to analyze model capabilities
"""
from .model_discovery import (
SimFileParser,
discover_fea_model,
format_discovery_result_for_llm,
)
__all__ = [
"SimFileParser",
"discover_fea_model",
"format_discovery_result_for_llm",
]

View File

@@ -0,0 +1,621 @@
"""
MCP Tool: FEA Model Discovery
Parses Siemens NX .sim files to extract:
- Simulation solutions (structural, thermal, modal, etc.)
- Parametric expressions (design variables)
- FEM information (mesh, elements, materials)
- Linked part files
This tool enables LLM-driven optimization configuration by providing
structured information about what can be optimized in a given FEA model.
"""
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Dict, Any, List, Optional
import json
import re
class SimFileParser:
"""
Parser for Siemens NX .sim (simulation) files.
IMPORTANT: Real NX .sim files are BINARY (not XML) in NX 12+.
The parser uses two approaches:
1. XML parsing for test/legacy files
2. Binary string extraction for real NX files
.sim files contain references to:
- Parent .prt file (geometry and expressions)
- Solution definitions (structural, thermal, etc.)
- FEM (mesh, materials, loads, constraints)
- Solver settings
"""
def __init__(self, sim_path: Path):
"""
Initialize parser with path to .sim file.
Args:
sim_path: Absolute path to .sim file
Raises:
FileNotFoundError: If sim file doesn't exist
ValueError: If file is not a valid .sim file
"""
self.sim_path = Path(sim_path)
if not self.sim_path.exists():
raise FileNotFoundError(f"Sim file not found: {sim_path}")
if self.sim_path.suffix.lower() != '.sim':
raise ValueError(f"Not a .sim file: {sim_path}")
self.tree = None
self.root = None
self.is_binary = False
self.sim_strings = [] # Extracted strings from binary file
self._parse_file()
def _parse_file(self):
"""
Parse the .sim file - handles both XML (test files) and binary (real NX files).
"""
# First, try XML parsing
try:
self.tree = ET.parse(self.sim_path)
self.root = self.tree.getroot()
self.is_binary = False
return
except ET.ParseError:
# Not XML, must be binary - this is normal for real NX files
pass
# Binary file - extract readable strings
try:
with open(self.sim_path, 'rb') as f:
content = f.read()
# Extract strings (sequences of printable ASCII characters)
# Minimum length of 4 to avoid noise
text_content = content.decode('latin-1', errors='ignore')
self.sim_strings = re.findall(r'[\x20-\x7E]{4,}', text_content)
self.is_binary = True
except Exception as e:
raise ValueError(f"Failed to parse .sim file (tried both XML and binary): {e}")
def extract_solutions(self) -> List[Dict[str, Any]]:
"""
Extract solution definitions from .sim file.
Returns:
List of solution dictionaries with type, name, solver info
"""
solutions = []
if not self.is_binary and self.root is not None:
# XML parsing
for solution_tag in ['Solution', 'AnalysisSolution', 'SimSolution']:
for elem in self.root.iter(solution_tag):
solution_info = {
'name': elem.get('name', 'Unknown'),
'type': elem.get('type', 'Unknown'),
'solver': elem.get('solver', 'NX Nastran'),
'description': elem.get('description', ''),
}
solutions.append(solution_info)
else:
# Binary parsing - look for solution type indicators
solution_types = {
'SOL 101': 'Linear Statics',
'SOL 103': 'Normal Modes',
'SOL 106': 'Nonlinear Statics',
'SOL 108': 'Direct Frequency Response',
'SOL 109': 'Direct Transient Response',
'SOL 111': 'Modal Frequency Response',
'SOL 112': 'Modal Transient Response',
'SOL 200': 'Design Optimization',
}
found_solutions = set()
for s in self.sim_strings:
for sol_id, sol_type in solution_types.items():
if sol_id in s:
found_solutions.add(sol_type)
# Also check for solution names in strings
for s in self.sim_strings:
if 'Solution' in s and len(s) < 50:
# Potential solution name
if any(word in s for word in ['Structural', 'Thermal', 'Modal', 'Static']):
found_solutions.add(s.strip())
for sol_name in found_solutions:
solutions.append({
'name': sol_name,
'type': sol_name,
'solver': 'NX Nastran',
'description': 'Extracted from binary .sim file'
})
# Default if nothing found
if not solutions:
solutions.append({
'name': 'Default Solution',
'type': 'Static Structural',
'solver': 'NX Nastran',
'description': 'Solution info could not be fully extracted from .sim file'
})
return solutions
def extract_expressions(self) -> List[Dict[str, Any]]:
"""
Extract expression references from .sim file.
Note: Actual expression values are stored in the .prt file.
This method extracts references and attempts to read from .prt if available.
Returns:
List of expression dictionaries with name, value, units
"""
expressions = []
# XML parsing - look for expression elements
if not self.is_binary and self.root is not None:
for expr_elem in self.root.iter('Expression'):
expr_info = {
'name': expr_elem.get('name', ''),
'value': expr_elem.get('value', None),
'units': expr_elem.get('units', ''),
'formula': expr_elem.text if expr_elem.text else None
}
if expr_info['name']:
expressions.append(expr_info)
# Try to read from associated .prt file (works for both XML and binary .sim)
# Try multiple naming patterns:
# 1. Same name as .sim: Bracket_sim1.prt
# 2. Base name: Bracket.prt
# 3. With _i suffix: Bracket_fem1_i.prt
prt_paths = [
self.sim_path.with_suffix('.prt'), # Bracket_sim1.prt
self.sim_path.parent / f"{self.sim_path.stem.split('_')[0]}.prt", # Bracket.prt
self.sim_path.parent / f"{self.sim_path.stem}_i.prt", # Bracket_sim1_i.prt
]
for prt_path in prt_paths:
if prt_path.exists():
prt_expressions = self._extract_prt_expressions(prt_path)
# Merge with existing, prioritizing .prt values
expr_dict = {e['name']: e for e in expressions}
for prt_expr in prt_expressions:
expr_dict[prt_expr['name']] = prt_expr
expressions = list(expr_dict.values())
break # Use first .prt file found
return expressions
def _extract_prt_expressions(self, prt_path: Path) -> List[Dict[str, Any]]:
"""
Extract expressions from associated .prt file.
.prt files are binary, but expression data is stored in readable sections.
NX expression format: #(Type [units]) name: value;
Args:
prt_path: Path to .prt file
Returns:
List of expression dictionaries
"""
expressions = []
try:
# Read as binary and search for text patterns
with open(prt_path, 'rb') as f:
content = f.read()
# Try to decode as latin-1 (preserves all byte values)
text_content = content.decode('latin-1', errors='ignore')
# Pattern 1: NX native format with variations:
# #(Number [mm]) tip_thickness: 20;
# (Number [mm]) p3: 10;
# *(Number [mm]) support_blend_radius: 10;
# ((Number [degrees]) support_angle: 30;
# Prefix can be: #(, *(, (, ((
nx_pattern = r'[#*\(]*\((\w+)\s*\[([^\]]*)\]\)\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*:\s*([-+]?\d*\.?\d+(?:[eE][-+]?\d+)?)'
# Use set to avoid duplicates
expr_names_seen = set()
for match in re.finditer(nx_pattern, text_content):
expr_type, units, name, value = match.groups()
if name not in expr_names_seen:
expr_names_seen.add(name)
expressions.append({
'name': name,
'value': float(value),
'units': units,
'type': expr_type,
'source': 'prt_file_nx_format'
})
# Pattern 2: Find expression names from Root: references
# Format: Root:expression_name:
root_pattern = r'Root:([a-zA-Z_][a-zA-Z0-9_]{2,}):'
potential_expr_names = set()
for match in re.finditer(root_pattern, text_content):
name = match.group(1)
# Filter out common NX internal names
if name not in ['index', '%%Name', '%%ug_objects_for_', 'WorldModifier']:
if not name.startswith('%%'):
potential_expr_names.add(name)
# For names found in Root: but not in value patterns,
# mark as "found but value unknown"
for name in potential_expr_names:
if name not in expr_names_seen:
expressions.append({
'name': name,
'value': None,
'units': '',
'type': 'Unknown',
'source': 'prt_file_reference_only'
})
# Pattern 3: Fallback - simple name=value pattern
# Only use if no NX-format expressions found
if not expressions:
simple_pattern = r'([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*([-+]?\d*\.?\d+(?:[eE][-+]?\d+)?)'
for match in re.finditer(simple_pattern, text_content):
name, value = match.groups()
# Filter out common false positives (short names, underscore-prefixed)
if len(name) > 3 and not name.startswith('_'):
# Additional filter: avoid Nastran keywords
if name.upper() not in ['PRINT', 'PUNCH', 'PLOT', 'BOTH', 'GRID', 'GAUSS']:
expressions.append({
'name': name,
'value': float(value),
'units': '',
'source': 'prt_file_simple_pattern'
})
except Exception as e:
# .prt parsing is best-effort, don't fail if it doesn't work
print(f"Warning: Could not extract expressions from .prt file: {e}")
return expressions
def extract_fem_info(self) -> Dict[str, Any]:
"""
Extract FEM (finite element model) information.
Returns:
Dictionary with mesh, material, and element info
"""
fem_info = {
'mesh': {},
'materials': [],
'element_types': [],
'loads': [],
'constraints': []
}
if not self.is_binary and self.root is not None:
# XML parsing
for mesh_elem in self.root.iter('Mesh'):
fem_info['mesh'] = {
'name': mesh_elem.get('name', 'Default Mesh'),
'element_size': mesh_elem.get('element_size', 'Unknown'),
'node_count': mesh_elem.get('node_count', 'Unknown'),
'element_count': mesh_elem.get('element_count', 'Unknown')
}
for mat_elem in self.root.iter('Material'):
material = {
'name': mat_elem.get('name', 'Unknown'),
'type': mat_elem.get('type', 'Isotropic'),
'properties': {}
}
for prop in ['youngs_modulus', 'poissons_ratio', 'density', 'yield_strength']:
if mat_elem.get(prop):
material['properties'][prop] = mat_elem.get(prop)
fem_info['materials'].append(material)
for elem_type in self.root.iter('ElementType'):
fem_info['element_types'].append(elem_type.get('type', 'Unknown'))
for load_elem in self.root.iter('Load'):
load = {
'name': load_elem.get('name', 'Unknown'),
'type': load_elem.get('type', 'Force'),
'magnitude': load_elem.get('magnitude', 'Unknown')
}
fem_info['loads'].append(load)
for constraint_elem in self.root.iter('Constraint'):
constraint = {
'name': constraint_elem.get('name', 'Unknown'),
'type': constraint_elem.get('type', 'Fixed'),
}
fem_info['constraints'].append(constraint)
else:
# Binary parsing - extract from .fem file if available
fem_path = self.sim_path.with_name(self.sim_path.stem.replace('_sim', '_fem') + '.fem')
if not fem_path.exists():
# Try alternative naming patterns
fem_path = self.sim_path.parent / f"{self.sim_path.stem.split('_')[0]}_fem1.fem"
if fem_path.exists():
fem_info = self._extract_fem_from_fem_file(fem_path)
else:
# Extract what we can from .sim strings
fem_info['note'] = 'Limited FEM info available from binary .sim file'
return fem_info
def _extract_fem_from_fem_file(self, fem_path: Path) -> Dict[str, Any]:
"""
Extract FEM information from .fem file.
Args:
fem_path: Path to .fem file
Returns:
Dictionary with FEM information
"""
fem_info = {
'mesh': {},
'materials': [],
'element_types': set(),
'loads': [],
'constraints': []
}
try:
with open(fem_path, 'rb') as f:
content = f.read()
text_content = content.decode('latin-1', errors='ignore')
# Look for mesh metadata
mesh_match = re.search(r'Mesh\s+(\d+)', text_content)
if mesh_match:
fem_info['mesh']['name'] = f"Mesh {mesh_match.group(1)}"
# Look for material names
for material_match in re.finditer(r'MAT\d+\s+([A-Za-z0-9_\-\s]+)', text_content):
mat_name = material_match.group(1).strip()
if mat_name and len(mat_name) > 2:
fem_info['materials'].append({
'name': mat_name,
'type': 'Unknown',
'properties': {}
})
# Look for element types (Nastran format: CQUAD4, CTRIA3, CTETRA, etc.)
element_pattern = r'\b(C[A-Z]{3,6}\d?)\b'
for elem_match in re.finditer(element_pattern, text_content):
elem_type = elem_match.group(1)
if elem_type.startswith('C') and len(elem_type) <= 8:
fem_info['element_types'].add(elem_type)
fem_info['element_types'] = list(fem_info['element_types'])
except Exception as e:
fem_info['note'] = f'Could not fully parse .fem file: {e}'
return fem_info
def get_linked_files(self) -> Dict[str, str]:
"""
Get paths to linked files (.prt, result files, etc.)
Returns:
Dictionary mapping file type to path
"""
linked_files = {}
# .prt file (geometry and expressions)
prt_path = self.sim_path.with_suffix('.prt')
if prt_path.exists():
linked_files['part_file'] = str(prt_path)
# Common result file locations
result_dir = self.sim_path.parent
sim_name = self.sim_path.stem
# Nastran result files
for ext in ['.op2', '.f06', '.f04', '.bdf']:
result_file = result_dir / f"{sim_name}{ext}"
if result_file.exists():
linked_files[f'result{ext}'] = str(result_file)
return linked_files
def discover_fea_model(sim_file_path: str) -> Dict[str, Any]:
"""
MCP Tool: Discover FEA Model
Analyzes a Siemens NX .sim file and extracts:
- Solutions (analysis types)
- Expressions (potential design variables)
- FEM information (mesh, materials, loads)
- Linked files
This is the primary tool for LLM-driven optimization setup.
Args:
sim_file_path: Absolute path to .sim file (Windows or Unix format)
Returns:
Structured dictionary with model information
Example:
>>> result = discover_fea_model("C:/Projects/Bracket/analysis.sim")
>>> print(result['expressions'])
[{'name': 'wall_thickness', 'value': 5.0, 'units': 'mm'}, ...]
"""
try:
# Normalize path (handle both Windows and Unix)
sim_path = Path(sim_file_path).resolve()
# Parse the .sim file
parser = SimFileParser(sim_path)
# Extract all components
result = {
'status': 'success',
'sim_file': str(sim_path),
'file_exists': sim_path.exists(),
'solutions': parser.extract_solutions(),
'expressions': parser.extract_expressions(),
'fem_info': parser.extract_fem_info(),
'linked_files': parser.get_linked_files(),
'metadata': {
'parser_version': '0.1.0',
'nx_version': 'NX 2412', # Can be extracted from .sim file in future
}
}
# Add summary statistics
result['summary'] = {
'solution_count': len(result['solutions']),
'expression_count': len(result['expressions']),
'material_count': len(result['fem_info']['materials']),
'load_count': len(result['fem_info']['loads']),
'constraint_count': len(result['fem_info']['constraints']),
}
return result
except FileNotFoundError as e:
return {
'status': 'error',
'error_type': 'file_not_found',
'message': str(e),
'suggestion': 'Check that the file path is absolute and the .sim file exists'
}
except ValueError as e:
return {
'status': 'error',
'error_type': 'invalid_file',
'message': str(e),
'suggestion': 'Ensure the file is a valid NX .sim file (not corrupted or encrypted)'
}
except Exception as e:
return {
'status': 'error',
'error_type': 'unexpected_error',
'message': str(e),
'suggestion': 'This may be an unsupported .sim file format. Please report this issue.'
}
def format_discovery_result_for_llm(result: Dict[str, Any]) -> str:
"""
Format discovery result for LLM consumption (Markdown).
This is used by the MCP server to present results to the LLM
in a clear, structured format.
Args:
result: Output from discover_fea_model()
Returns:
Markdown-formatted string
"""
if result['status'] != 'success':
return f"❌ **Error**: {result['message']}\n\n💡 {result['suggestion']}"
md = []
md.append(f"# FEA Model Analysis\n")
md.append(f"**File**: `{result['sim_file']}`\n")
# Solutions
md.append(f"## Solutions ({result['summary']['solution_count']})\n")
for sol in result['solutions']:
md.append(f"- **{sol['name']}** ({sol['type']}) - Solver: {sol['solver']}")
if sol['description']:
md.append(f" - {sol['description']}")
md.append("")
# Expressions (Design Variables)
md.append(f"## Expressions ({result['summary']['expression_count']})\n")
if result['expressions']:
md.append("| Name | Value | Units |")
md.append("|------|-------|-------|")
for expr in result['expressions']:
value = expr.get('value', 'N/A')
units = expr.get('units', '')
md.append(f"| `{expr['name']}` | {value} | {units} |")
else:
md.append("⚠️ No expressions found. Model may not be parametric.")
md.append("")
# FEM Information
fem = result['fem_info']
md.append(f"## FEM Information\n")
if fem['mesh']:
md.append(f"**Mesh**: {fem['mesh'].get('name', 'Unknown')}")
md.append(f"- Nodes: {fem['mesh'].get('node_count', 'Unknown')}")
md.append(f"- Elements: {fem['mesh'].get('element_count', 'Unknown')}")
md.append("")
if fem['materials']:
md.append(f"**Materials** ({len(fem['materials'])})")
for mat in fem['materials']:
md.append(f"- {mat['name']} ({mat['type']})")
md.append("")
if fem['loads']:
md.append(f"**Loads** ({len(fem['loads'])})")
for load in fem['loads']:
md.append(f"- {load['name']} ({load['type']})")
md.append("")
if fem['constraints']:
md.append(f"**Constraints** ({len(fem['constraints'])})")
for constraint in fem['constraints']:
md.append(f"- {constraint['name']} ({constraint['type']})")
md.append("")
# Linked Files
if result['linked_files']:
md.append(f"## Linked Files\n")
for file_type, file_path in result['linked_files'].items():
md.append(f"- **{file_type}**: `{file_path}`")
md.append("")
return "\n".join(md)
# For testing/debugging
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage: python model_discovery.py <path_to_sim_file>")
sys.exit(1)
sim_path = sys.argv[1]
result = discover_fea_model(sim_path)
if result['status'] == 'success':
print(format_discovery_result_for_llm(result))
print("\n" + "="*60)
print("JSON Output:")
print(json.dumps(result, indent=2))
else:
print(f"Error: {result['message']}")

View File

@@ -339,7 +339,7 @@ class OptimizationConfigBuilder:
# Example usage
if __name__ == "__main__":
from mcp_server.tools.model_discovery import discover_fea_model
from optimization_engine.model_discovery import discover_fea_model
# Step 1: Discover model
print("Step 1: Discovering FEA model...")

View File

@@ -1,66 +0,0 @@
"""
Pluggable Result Extractor System
Base classes and implementations for extracting metrics from FEA results.
"""
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional
from pathlib import Path
class ResultExtractor(ABC):
"""Base class for all result extractors."""
@abstractmethod
def extract(self, result_files: Dict[str, Path], config: Dict[str, Any]) -> Dict[str, float]:
"""
Extract metrics from FEA results.
Args:
result_files: Dictionary mapping file types to paths (e.g., {'op2': Path(...), 'f06': Path(...)})
config: Extractor-specific configuration parameters
Returns:
Dictionary mapping metric names to values
"""
pass
@property
@abstractmethod
def required_files(self) -> list[str]:
"""List of required file types (e.g., ['op2'], ['f06'], etc.)."""
pass
@property
def name(self) -> str:
"""Extractor name for registration."""
return self.__class__.__name__.replace("Extractor", "").lower()
# Registry of available extractors
_EXTRACTOR_REGISTRY: Dict[str, type[ResultExtractor]] = {}
def register_extractor(extractor_class: type[ResultExtractor]) -> type[ResultExtractor]:
"""Decorator to register an extractor."""
_EXTRACTOR_REGISTRY[extractor_class().name] = extractor_class
return extractor_class
def get_extractor(name: str) -> Optional[type[ResultExtractor]]:
"""Get extractor class by name."""
return _EXTRACTOR_REGISTRY.get(name)
def list_extractors() -> list[str]:
"""List all registered extractor names."""
return list(_EXTRACTOR_REGISTRY.keys())
__all__ = [
"ResultExtractor",
"register_extractor",
"get_extractor",
"list_extractors",
]

View File

@@ -1,207 +0,0 @@
"""
Result Extractors
Wrapper functions that integrate with the optimization runner.
These extract optimization metrics from NX Nastran result files.
"""
from pathlib import Path
from typing import Dict, Any
import sys
# Add project root to path
project_root = Path(__file__).parent.parent.parent
sys.path.insert(0, str(project_root))
from optimization_engine.result_extractors.op2_extractor_example import (
extract_max_displacement,
extract_max_stress,
extract_mass
)
def mass_extractor(result_path: Path) -> Dict[str, float]:
"""
Extract mass metrics for optimization.
Args:
result_path: Path to .op2 file or directory containing results
Returns:
Dict with 'total_mass' and other mass-related metrics
"""
# If result_path is a directory, find the .op2 file
if result_path.is_dir():
op2_files = list(result_path.glob("*.op2"))
if not op2_files:
raise FileNotFoundError(f"No .op2 files found in {result_path}")
op2_path = op2_files[0] # Use first .op2 file
else:
op2_path = result_path
if not op2_path.exists():
raise FileNotFoundError(f"Result file not found: {op2_path}")
result = extract_mass(op2_path)
# Ensure total_mass key exists
if 'total_mass' not in result or result['total_mass'] is None:
raise ValueError(f"Could not extract total_mass from {op2_path}")
return result
def stress_extractor(result_path: Path) -> Dict[str, float]:
"""
Extract stress metrics for optimization.
Args:
result_path: Path to .op2 file or directory containing results
Returns:
Dict with 'max_von_mises' and other stress metrics
"""
# If result_path is a directory, find the .op2 file
if result_path.is_dir():
op2_files = list(result_path.glob("*.op2"))
if not op2_files:
raise FileNotFoundError(f"No .op2 files found in {result_path}")
op2_path = op2_files[0]
else:
op2_path = result_path
if not op2_path.exists():
raise FileNotFoundError(f"Result file not found: {op2_path}")
result = extract_max_stress(op2_path, stress_type='von_mises')
# Ensure max_von_mises key exists
if 'max_stress' in result:
result['max_von_mises'] = result['max_stress']
if 'max_von_mises' not in result or result['max_von_mises'] is None:
raise ValueError(f"Could not extract max_von_mises from {op2_path}")
return result
def displacement_extractor(result_path: Path) -> Dict[str, float]:
"""
Extract displacement metrics for optimization.
Args:
result_path: Path to .op2 file or directory containing results
Returns:
Dict with 'max_displacement' and other displacement metrics
"""
# If result_path is a directory, find the .op2 file
if result_path.is_dir():
op2_files = list(result_path.glob("*.op2"))
if not op2_files:
raise FileNotFoundError(f"No .op2 files found in {result_path}")
op2_path = op2_files[0]
else:
op2_path = result_path
if not op2_path.exists():
raise FileNotFoundError(f"Result file not found: {op2_path}")
result = extract_max_displacement(op2_path)
# Ensure max_displacement key exists
if 'max_displacement' not in result or result['max_displacement'] is None:
raise ValueError(f"Could not extract max_displacement from {op2_path}")
return result
def volume_extractor(result_path: Path) -> Dict[str, float]:
"""
Extract volume metrics for optimization.
Note: Volume is often not directly in OP2 files.
This is a placeholder that could be extended to:
- Calculate from mass and density
- Extract from .f06 file
- Query from NX model directly
Args:
result_path: Path to result files
Returns:
Dict with 'total_volume'
"""
# For now, estimate from mass (would need material density)
# This is a placeholder implementation
mass_result = mass_extractor(result_path)
# Assuming steel density ~7850 kg/m^3 = 7.85e-6 kg/mm^3
# volume (mm^3) = mass (kg) / density (kg/mm^3)
assumed_density = 7.85e-6 # kg/mm^3
if mass_result['total_mass']:
total_volume = mass_result['total_mass'] / assumed_density
else:
total_volume = None
return {
'total_volume': total_volume,
'note': 'Volume estimated from mass using assumed density'
}
# Registry of all available extractors
EXTRACTOR_REGISTRY = {
'mass_extractor': mass_extractor,
'stress_extractor': stress_extractor,
'displacement_extractor': displacement_extractor,
'volume_extractor': volume_extractor
}
def get_extractor(extractor_name: str):
"""
Get an extractor function by name.
Args:
extractor_name: Name of the extractor
Returns:
Extractor function
Raises:
ValueError: If extractor not found
"""
if extractor_name not in EXTRACTOR_REGISTRY:
available = ', '.join(EXTRACTOR_REGISTRY.keys())
raise ValueError(f"Unknown extractor: {extractor_name}. Available: {available}")
return EXTRACTOR_REGISTRY[extractor_name]
# Example usage
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage: python extractors.py <path_to_op2_file>")
print("\nAvailable extractors:")
for name in EXTRACTOR_REGISTRY.keys():
print(f" - {name}")
sys.exit(1)
result_path = Path(sys.argv[1])
print("="*60)
print("TESTING ALL EXTRACTORS")
print("="*60)
for extractor_name, extractor_func in EXTRACTOR_REGISTRY.items():
print(f"\n{extractor_name}:")
try:
result = extractor_func(result_path)
for key, value in result.items():
print(f" {key}: {value}")
except Exception as e:
print(f" Error: {e}")

View File

@@ -1,73 +0,0 @@
"""
Extract element forces from CBAR in Z direction from OP2
Auto-generated by Atomizer Phase 3 - pyNastran Research Agent
Pattern: cbar_force
Element Type: CBAR
Result Type: force
API: model.cbar_force[subcase]
"""
from pathlib import Path
from typing import Dict, Any
import numpy as np
from pyNastran.op2.op2 import OP2
def extract_cbar_force(op2_file: Path, subcase: int = 1, direction: str = 'Z'):
"""
Extract forces from CBAR elements.
Args:
op2_file: Path to OP2 file
subcase: Subcase ID
direction: Force direction ('X', 'Y', 'Z', 'axial', 'torque')
Returns:
Dict with force statistics
"""
from pyNastran.op2.op2 import OP2
import numpy as np
model = OP2()
model.read_op2(str(op2_file))
if not hasattr(model, 'cbar_force'):
raise ValueError("No CBAR force results in OP2")
force = model.cbar_force[subcase]
itime = 0
# CBAR force data structure:
# [bending_moment_a1, bending_moment_a2,
# bending_moment_b1, bending_moment_b2,
# shear1, shear2, axial, torque]
direction_map = {
'shear1': 4,
'shear2': 5,
'axial': 6,
'Z': 6, # Commonly axial is Z direction
'torque': 7
}
col_idx = direction_map.get(direction, direction_map.get(direction.lower(), 6))
forces = force.data[itime, :, col_idx]
return {
f'max_{direction}_force': float(np.max(np.abs(forces))),
f'avg_{direction}_force': float(np.mean(np.abs(forces))),
f'min_{direction}_force': float(np.min(np.abs(forces))),
'forces_array': forces.tolist()
}
if __name__ == '__main__':
# Example usage
import sys
if len(sys.argv) > 1:
op2_file = Path(sys.argv[1])
result = extract_cbar_force(op2_file)
print(f"Extraction result: {result}")
else:
print("Usage: python {sys.argv[0]} <op2_file>")

View File

@@ -1,56 +0,0 @@
"""
Extract displacement from OP2
Auto-generated by Atomizer Phase 3 - pyNastran Research Agent
Pattern: displacement
Element Type: General
Result Type: displacement
API: model.displacements[subcase]
"""
from pathlib import Path
from typing import Dict, Any
import numpy as np
from pyNastran.op2.op2 import OP2
def extract_displacement(op2_file: Path, subcase: int = 1):
"""Extract displacement results from OP2 file."""
from pyNastran.op2.op2 import OP2
import numpy as np
model = OP2()
model.read_op2(str(op2_file))
disp = model.displacements[subcase]
itime = 0 # static case
# Extract translation components
txyz = disp.data[itime, :, :3] # [tx, ty, tz]
# Calculate total displacement
total_disp = np.linalg.norm(txyz, axis=1)
max_disp = np.max(total_disp)
# Get node info
node_ids = [nid for (nid, grid_type) in disp.node_gridtype]
max_disp_node = node_ids[np.argmax(total_disp)]
return {
'max_displacement': float(max_disp),
'max_disp_node': int(max_disp_node),
'max_disp_x': float(np.max(np.abs(txyz[:, 0]))),
'max_disp_y': float(np.max(np.abs(txyz[:, 1]))),
'max_disp_z': float(np.max(np.abs(txyz[:, 2])))
}
if __name__ == '__main__':
# Example usage
import sys
if len(sys.argv) > 1:
op2_file = Path(sys.argv[1])
result = extract_displacement(op2_file)
print(f"Extraction result: {result}")
else:
print("Usage: python {sys.argv[0]} <op2_file>")

View File

@@ -1,55 +0,0 @@
"""
Extract expression value from NX .prt file
Used for extracting computed values like mass, volume, etc.
This extractor reads expressions using the .exp export method for accuracy.
"""
from pathlib import Path
from typing import Dict, Any
from optimization_engine.nx_updater import NXParameterUpdater
def extract_expression(prt_file: Path, expression_name: str):
"""
Extract an expression value from NX .prt file.
Args:
prt_file: Path to .prt file
expression_name: Name of expression to extract (e.g., 'p173' for mass)
Returns:
Dict with expression value and units
"""
updater = NXParameterUpdater(prt_file, backup=False)
expressions = updater.get_all_expressions(use_exp_export=True)
if expression_name not in expressions:
raise ValueError(f"Expression '{expression_name}' not found in {prt_file}")
expr_info = expressions[expression_name]
# If expression is a formula (value is None), we need to evaluate it
# For now, we'll raise an error if it's a formula - user should use the computed value
if expr_info['value'] is None and expr_info['formula'] is not None:
raise ValueError(
f"Expression '{expression_name}' is a formula: {expr_info['formula']}. "
f"This extractor requires a computed value, not a formula reference."
)
return {
expression_name: expr_info['value'],
f'{expression_name}_units': expr_info['units']
}
if __name__ == '__main__':
# Example usage
import sys
if len(sys.argv) > 2:
prt_file = Path(sys.argv[1])
expression_name = sys.argv[2]
result = extract_expression(prt_file, expression_name)
print(f"Extraction result: {result}")
else:
print(f"Usage: python {sys.argv[0]} <prt_file> <expression_name>")

View File

@@ -1,127 +0,0 @@
"""
Extract von Mises stress from solid and shell elements
Auto-generated by Atomizer Phase 3 - pyNastran Research Agent
Pattern: element_stress
Element Types: CTETRA, CHEXA, CPENTA (solids), CQUAD4, CTRIA3 (shells)
Result Type: stress
API: model.ctetra_stress[subcase], model.cquad4_stress[subcase], etc.
"""
from pathlib import Path
from typing import Dict, Any
import numpy as np
from pyNastran.op2.op2 import OP2
def extract_solid_stress(op2_file: Path, subcase: int = 1, element_type: str = 'auto'):
"""
Extract stress from solid or shell elements.
Args:
op2_file: Path to OP2 file
subcase: Subcase number (default 1)
element_type: Element type ('ctetra', 'chexa', 'cquad4', 'ctria3', or 'auto')
'auto' will detect available element types
Returns:
Dict with max von Mises stress and element info
"""
from pyNastran.op2.op2 import OP2
import numpy as np
model = OP2()
model.read_op2(str(op2_file))
# Auto-detect element type if requested
if element_type == 'auto':
# Try common element types in order
# pyNastran uses "stress.{element}_stress" as attribute names (with dot in name!)
possible_types = ['cquad4', 'ctria3', 'ctetra', 'chexa', 'cpenta']
element_type = None
for elem_type in possible_types:
stress_attr = f"stress.{elem_type}_stress"
try:
stress_dict = getattr(model, stress_attr, None)
if isinstance(stress_dict, dict) and subcase in stress_dict:
element_type = elem_type
break
except AttributeError:
continue
if element_type is None:
raise ValueError(f"No stress results found in OP2 for subcase {subcase}")
# Get stress object for element type
# pyNastran stores stress as "stress.{element}_stress" (e.g., "stress.cquad4_stress")
stress_attr = f"stress.{element_type}_stress"
try:
stress_dict = getattr(model, stress_attr)
except AttributeError:
raise ValueError(f"No {element_type} stress results in OP2")
if not isinstance(stress_dict, dict) or subcase not in stress_dict:
raise ValueError(f"Subcase {subcase} not found in {element_type} stress results")
stress = stress_dict[subcase]
itime = 0
# Extract von Mises
# For CQUAD4/CTRIA3 (shells): data shape is [ntimes, nelements, 8]
# Columns: [fiber_distance, oxx, oyy, txy, angle, omax, omin, von_mises]
# Column 7 (0-indexed) is von Mises
#
# For CTETRA/CHEXA (solids): Column 9 is von Mises
if element_type in ['cquad4', 'ctria3']:
# Shell elements - von Mises is at column 7
von_mises = stress.data[itime, :, 7]
else:
# Solid elements - von Mises is at column 9
von_mises = stress.data[itime, :, 9]
# Raw stress values from OP2 (in internal Nastran units)
max_stress_raw = float(np.max(von_mises))
min_stress_raw = float(np.min(von_mises))
avg_stress_raw = float(np.mean(von_mises))
# Convert to MPa
# For MN-MM unit system (UNITSYS=MN-MM), Nastran outputs stress with implied decimal
# The raw value needs to be divided by 1000 to get MPa
# Example: 131507.1875 (raw) = 131.507 MPa
max_stress_mpa = max_stress_raw / 1000.0
min_stress_mpa = min_stress_raw / 1000.0
avg_stress_mpa = avg_stress_raw / 1000.0
# Get element info
if hasattr(stress, 'element_node'):
element_ids = stress.element_node[:, 0] # First column is element ID
elif hasattr(stress, 'element'):
element_ids = stress.element
else:
element_ids = np.arange(len(von_mises))
max_stress_elem = int(element_ids[np.argmax(von_mises)])
return {
'max_von_mises': max_stress_mpa,
'min_von_mises': min_stress_mpa,
'avg_von_mises': avg_stress_mpa,
'max_stress_element': max_stress_elem,
'element_type': element_type,
'num_elements': len(von_mises),
'units': 'MPa'
}
if __name__ == '__main__':
# Example usage
import sys
if len(sys.argv) > 1:
op2_file = Path(sys.argv[1])
result = extract_solid_stress(op2_file)
print(f"Extraction result: {result}")
else:
print("Usage: python {sys.argv[0]} <op2_file>")

View File

@@ -1,264 +0,0 @@
"""
Example: Result Extraction from OP2 files using pyNastran
This shows how to extract optimization metrics from Nastran OP2 files.
Common metrics:
- Max displacement (for stiffness constraints)
- Max von Mises stress (for strength constraints)
- Mass (for minimization objectives)
"""
from pathlib import Path
from typing import Dict, Any
import numpy as np
def extract_max_displacement(op2_path: Path) -> Dict[str, Any]:
"""
Extract maximum displacement magnitude from OP2 file.
Args:
op2_path: Path to .op2 file
Returns:
Dictionary with max displacement, node ID, and components
"""
from pyNastran.op2.op2 import OP2
op2 = OP2()
op2.read_op2(str(op2_path))
# Get first subcase (usually the only one in static analysis)
subcase_id = list(op2.displacements.keys())[0]
displacements = op2.displacements[subcase_id]
# Extract node IDs and displacement data
node_ids = displacements.node_gridtype[:, 0].astype(int)
disp_data = displacements.data[0] # First (and usually only) timestep
# Calculate magnitude: sqrt(dx^2 + dy^2 + dz^2)
dx = disp_data[:, 0]
dy = disp_data[:, 1]
dz = disp_data[:, 2]
magnitudes = np.sqrt(dx**2 + dy**2 + dz**2)
# Find max
max_idx = np.argmax(magnitudes)
max_displacement = magnitudes[max_idx]
max_node_id = node_ids[max_idx]
return {
'max_displacement': float(max_displacement),
'max_node_id': int(max_node_id),
'dx': float(dx[max_idx]),
'dy': float(dy[max_idx]),
'dz': float(dz[max_idx]),
'units': 'mm', # NX typically uses mm
'subcase': subcase_id
}
def extract_max_stress(op2_path: Path, stress_type: str = 'von_mises') -> Dict[str, Any]:
"""
Extract maximum stress from OP2 file.
Args:
op2_path: Path to .op2 file
stress_type: 'von_mises' or 'max_principal'
Returns:
Dictionary with max stress, element ID, and location
"""
from pyNastran.op2.op2 import OP2
op2 = OP2(debug=False)
op2.read_op2(str(op2_path))
# Stress can be in different tables depending on element type
# Common: cquad4_stress, ctria3_stress, ctetra_stress, etc.
stress_tables = [
'cquad4_stress',
'ctria3_stress',
'ctetra_stress',
'chexa_stress',
'cpenta_stress',
'cbar_stress',
'cbeam_stress'
]
max_stress_overall = 0.0
max_element_id = None
max_element_type = None
# Try to get stress from different pyNastran API formats
for table_name in stress_tables:
stress_table = None
# Try format 1: Attribute name with dot (e.g., 'stress.chexa_stress')
# This is used in newer pyNastran versions
dotted_name = f'stress.{table_name}'
if hasattr(op2, dotted_name):
stress_table = getattr(op2, dotted_name)
# Try format 2: Nested attribute op2.stress.chexa_stress
elif hasattr(op2, 'stress') and hasattr(op2.stress, table_name):
stress_table = getattr(op2.stress, table_name)
# Try format 3: Direct attribute op2.chexa_stress (older pyNastran)
elif hasattr(op2, table_name):
stress_table = getattr(op2, table_name)
if stress_table:
subcase_id = list(stress_table.keys())[0]
stress_data = stress_table[subcase_id]
# Extract von Mises stress
# Note: Structure varies by element type
element_ids = stress_data.element_node[:, 0].astype(int)
if stress_type == 'von_mises':
# For solid elements (CHEXA, CTETRA, CPENTA): von Mises is at index 9
# For shell elements (CQUAD4, CTRIA3): von Mises is last column (-1)
if table_name in ['chexa_stress', 'ctetra_stress', 'cpenta_stress']:
# Solid elements: data shape is [itime, nnodes, 10]
# Index 9 is von_mises [oxx, oyy, ozz, txy, tyz, txz, o1, o2, o3, von_mises]
stresses = stress_data.data[0, :, 9]
else:
# Shell elements: von Mises is last column
stresses = stress_data.data[0, :, -1]
else:
# Max principal stress
if table_name in ['chexa_stress', 'ctetra_stress', 'cpenta_stress']:
stresses = stress_data.data[0, :, 6] # o1 (max principal)
else:
stresses = stress_data.data[0, :, -2]
max_stress_in_table = np.max(stresses)
if max_stress_in_table > max_stress_overall:
max_stress_overall = max_stress_in_table
max_idx = np.argmax(stresses)
max_element_id = element_ids[max_idx]
max_element_type = table_name.replace('_stress', '')
# CRITICAL: NX Nastran outputs stress in kPa (mN/mm²), convert to MPa
# 1 kPa = 0.001 MPa
max_stress_overall_mpa = max_stress_overall / 1000.0
return {
'max_stress': float(max_stress_overall_mpa),
'stress_type': stress_type,
'element_id': int(max_element_id) if max_element_id else None,
'element_type': max_element_type,
'units': 'MPa',
}
def extract_mass(op2_path: Path) -> Dict[str, Any]:
"""
Extract total mass from OP2 file.
Args:
op2_path: Path to .op2 file
Returns:
Dictionary with mass and center of gravity
"""
from pyNastran.op2.op2 import OP2
op2 = OP2()
op2.read_op2(str(op2_path))
# Mass is in grid_point_weight table
if hasattr(op2, 'grid_point_weight') and op2.grid_point_weight:
mass_data = op2.grid_point_weight
# Total mass
total_mass = mass_data.mass.sum()
# Center of gravity
cg = mass_data.cg
return {
'total_mass': float(total_mass),
'cg_x': float(cg[0]),
'cg_y': float(cg[1]),
'cg_z': float(cg[2]),
'units': 'kg'
}
else:
# Fallback: Mass not directly available
return {
'total_mass': None,
'note': 'Mass data not found in OP2 file. Ensure PARAM,GRDPNT,0 is in Nastran deck'
}
# Combined extraction function for optimization
def extract_all_results(op2_path: Path) -> Dict[str, Any]:
"""
Extract all common optimization metrics from OP2 file.
Args:
op2_path: Path to .op2 file
Returns:
Dictionary with all results
"""
results = {
'op2_file': str(op2_path),
'status': 'success'
}
try:
results['displacement'] = extract_max_displacement(op2_path)
except Exception as e:
results['displacement'] = {'error': str(e)}
try:
results['stress'] = extract_max_stress(op2_path)
except Exception as e:
results['stress'] = {'error': str(e)}
try:
results['mass'] = extract_mass(op2_path)
except Exception as e:
results['mass'] = {'error': str(e)}
return results
# Example usage
if __name__ == "__main__":
import sys
import json
if len(sys.argv) < 2:
print("Usage: python op2_extractor_example.py <path_to_op2_file>")
sys.exit(1)
op2_path = Path(sys.argv[1])
if not op2_path.exists():
print(f"Error: File not found: {op2_path}")
sys.exit(1)
print(f"Extracting results from: {op2_path}")
print("=" * 60)
results = extract_all_results(op2_path)
print("\nResults:")
print(json.dumps(results, indent=2))
# Summary
print("\n" + "=" * 60)
print("SUMMARY:")
if 'displacement' in results and 'max_displacement' in results['displacement']:
disp = results['displacement']
print(f" Max Displacement: {disp['max_displacement']:.6f} {disp['units']} at node {disp['max_node_id']}")
if 'stress' in results and 'max_stress' in results['stress']:
stress = results['stress']
print(f" Max {stress['stress_type']}: {stress['max_stress']:.2f} {stress['units']} in element {stress['element_id']}")
if 'mass' in results and 'total_mass' in results['mass'] and results['mass']['total_mass']:
mass = results['mass']
print(f" Total Mass: {mass['total_mass']:.6f} {mass['units']}")