feat: Add AtomizerField training data export and intelligent model discovery

Major additions:
- Training data export system for AtomizerField neural network training
- Bracket stiffness optimization study with 50+ training samples
- Intelligent NX model discovery (auto-detect solutions, expressions, mesh)
- Result extractors module for displacement, stress, frequency, mass
- User-generated NX journals for advanced workflows
- Archive structure for legacy scripts and test outputs
- Protocol documentation and dashboard launcher

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-26 12:01:50 -05:00
parent a0c008a593
commit 2b3573ec42
949 changed files with 1405144 additions and 470 deletions

View File

@@ -0,0 +1 @@
"""Core extractor library for Atomizer."""

View File

@@ -0,0 +1,191 @@
"""
BDF Mass Extractor
==================
Extract mass from Nastran BDF/DAT file using pyNastran.
This is more reliable than OP2 GRDPNT extraction since the BDF always contains
material properties and element geometry.
Usage:
extractor = BDFMassExtractor(bdf_file="model.dat")
mass_kg = extractor.extract_mass()
"""
from pathlib import Path
from typing import Dict, Any
import numpy as np
try:
from pyNastran.bdf.bdf import read_bdf
except ImportError:
raise ImportError("pyNastran is required. Install with: pip install pyNastran")
class BDFMassExtractor:
"""
Extract mass from Nastran BDF/DAT file.
This extractor calculates mass from:
- Element geometry (GRID nodes, element definitions)
- Material properties (MAT1, etc. with density)
- Physical properties (PSOLID, PSHELL, etc. with thickness)
"""
def __init__(self, bdf_file: str):
"""
Args:
bdf_file: Path to .dat or .bdf file
"""
self.bdf_file = Path(bdf_file)
self._bdf_model = None
def _load_bdf(self):
"""Lazy load BDF file"""
if self._bdf_model is None:
if not self.bdf_file.exists():
raise FileNotFoundError(f"BDF file not found: {self.bdf_file}")
# Read BDF with xref=True to cross-reference cards
self._bdf_model = read_bdf(str(self.bdf_file), xref=True, debug=False)
return self._bdf_model
def extract_mass(self) -> Dict[str, Any]:
"""
Calculate total structural mass from BDF.
Returns:
dict: {
'mass_kg': total mass in kg,
'mass_g': total mass in grams,
'mass_ton': total mass in metric tons,
'cg': [x, y, z] center of gravity (if calculable),
'num_elements': number of elements,
'units': 'ton-mm-sec (Nastran default)',
'breakdown': {element_type: mass} breakdown by element type
}
"""
bdf = self._load_bdf()
# Calculate mass manually by iterating through elements
total_mass_ton = 0.0
breakdown = {}
total_elements = 0
cg_numerator = np.zeros(3) # For weighted CG calculation
for eid, elem in bdf.elements.items():
try:
# Get element mass
elem_mass = elem.Mass() # Returns mass in Nastran units (ton)
total_mass_ton += elem_mass
# Track by element type
elem_type = elem.type
if elem_type not in breakdown:
breakdown[elem_type] = 0.0
breakdown[elem_type] += elem_mass
# Get element centroid for CG calculation
try:
centroid = elem.Centroid()
cg_numerator += elem_mass * np.array(centroid)
except:
pass # Some elements may not have centroid method
total_elements += 1
except Exception as e:
# Some elements might not have Mass() method
# (e.g., rigid elements, SPCs, etc.)
continue
# Calculate center of gravity
if total_mass_ton > 0:
cg = cg_numerator / total_mass_ton
else:
cg = np.zeros(3)
# Convert units: NX uses kg-mm-s where "ton" = 1 kg (not 1000 kg!)
# So the mass returned by pyNastran is already in kg
mass_kg = float(total_mass_ton) # Already in kg
mass_g = mass_kg * 1000.0
# Breakdown is already in kg
breakdown_kg = {k: float(v) for k, v in breakdown.items()}
return {
'mass_kg': mass_kg,
'mass_g': mass_g,
'mass_ton': float(total_mass_ton), # This is actually kg in NX units
'cg': cg.tolist() if hasattr(cg, 'tolist') else list(cg),
'num_elements': total_elements,
'units': 'kg-mm-s (NX default)',
'breakdown': breakdown,
'breakdown_kg': breakdown_kg,
}
def get_material_info(self) -> Dict[str, Any]:
"""
Get material property information from BDF.
Returns:
dict: Material ID -> {density, E, nu, ...}
"""
bdf = self._load_bdf()
materials = {}
for mid, mat in bdf.materials.items():
mat_info = {
'type': mat.type,
'mid': mid,
}
# MAT1 (isotropic)
if hasattr(mat, 'rho') and mat.rho is not None:
mat_info['density'] = float(mat.rho) # ton/mm^3
mat_info['density_kg_m3'] = float(mat.rho * 1e12) # Convert to kg/m^3
if hasattr(mat, 'e'):
mat_info['E'] = float(mat.e) if mat.e is not None else None
if hasattr(mat, 'nu'):
mat_info['nu'] = float(mat.nu) if mat.nu is not None else None
materials[mid] = mat_info
return materials
def extract_mass_from_bdf(bdf_file: str) -> float:
"""
Convenience function to extract mass in kg.
Args:
bdf_file: Path to .dat or .bdf file
Returns:
Mass in kilograms
"""
extractor = BDFMassExtractor(bdf_file)
result = extractor.extract_mass()
return result['mass_kg']
if __name__ == "__main__":
# Example usage
import sys
if len(sys.argv) > 1:
bdf_file = sys.argv[1]
extractor = BDFMassExtractor(bdf_file)
# Extract mass
mass_result = extractor.extract_mass()
print(f"Mass: {mass_result['mass_kg']:.6f} kg ({mass_result['mass_g']:.2f} g)")
print(f"CG: {mass_result['cg']}")
print(f"Elements: {mass_result['num_elements']}")
print(f"Element breakdown: {mass_result['breakdown']}")
# Get material info
materials = extractor.get_material_info()
print(f"\nMaterials:")
for mid, mat_info in materials.items():
print(f" MAT{mid}: {mat_info}")

View File

@@ -0,0 +1,38 @@
{
"381739e9cada3a48": {
"name": "extract_displacement",
"filename": "extract_displacement.py",
"action": "extract_displacement",
"domain": "result_extraction",
"description": "Extract maximum displacement from structural analysis",
"params": {
"result_type": "displacement",
"metric": "maximum"
},
"signature": "381739e9cada3a48"
},
"63d54f297f2403e4": {
"name": "extract_von_mises_stress",
"filename": "extract_von_mises_stress.py",
"action": "extract_von_mises_stress",
"domain": "result_extraction",
"description": "Extract maximum von Mises stress from structural analysis",
"params": {
"result_type": "von_mises_stress",
"metric": "maximum"
},
"signature": "63d54f297f2403e4"
},
"2f58f241a96afb1f": {
"name": "extract_mass",
"filename": "extract_mass.py",
"action": "extract_mass",
"domain": "result_extraction",
"description": "Extract total structural mass",
"params": {
"result_type": "mass",
"metric": "total"
},
"signature": "2f58f241a96afb1f"
}
}

View File

@@ -0,0 +1,64 @@
"""
Extract maximum displacement from structural analysis
Auto-generated by Atomizer Phase 3 - pyNastran Research Agent
Pattern: displacement
Element Type: General
Result Type: displacement
API: model.displacements[subcase]
"""
from pathlib import Path
from typing import Dict, Any
import numpy as np
from pyNastran.op2.op2 import OP2
def extract_displacement(op2_file: Path, subcase: int = 1):
"""Extract displacement results from OP2 file."""
from pyNastran.op2.op2 import OP2
import numpy as np
model = OP2()
model.read_op2(str(op2_file))
# NX Nastran often uses empty string '' as subcase key
available_subcases = list(model.displacements.keys())
if not available_subcases:
raise ValueError(f"No displacement data found in OP2 file")
# Use the first available subcase (often '' for NX Nastran)
actual_subcase = available_subcases[0]
disp = model.displacements[actual_subcase]
itime = 0 # static case
# Extract translation components
txyz = disp.data[itime, :, :3] # [tx, ty, tz]
# Calculate total displacement
total_disp = np.linalg.norm(txyz, axis=1)
max_disp = np.max(total_disp)
# Get node info
node_ids = [nid for (nid, grid_type) in disp.node_gridtype]
max_disp_node = node_ids[np.argmax(total_disp)]
return {
'max_displacement': float(max_disp),
'max_disp_node': int(max_disp_node),
'max_disp_x': float(np.max(np.abs(txyz[:, 0]))),
'max_disp_y': float(np.max(np.abs(txyz[:, 1]))),
'max_disp_z': float(np.max(np.abs(txyz[:, 2])))
}
if __name__ == '__main__':
# Example usage
import sys
if len(sys.argv) > 1:
op2_file = Path(sys.argv[1])
result = extract_displacement(op2_file)
print(f"Extraction result: {result}")
else:
print("Usage: python {sys.argv[0]} <op2_file>")

View File

@@ -0,0 +1,100 @@
"""
Extract natural frequencies from modal analysis
Auto-generated by Atomizer Phase 3 - pyNastran Research Agent
Pattern: eigenvalue_extraction
Element Type: General
Result Type: eigenvalues
API: model.eigenvalues[subcase]
"""
from pathlib import Path
from typing import Dict, Any
import numpy as np
from pyNastran.op2.op2 import OP2
def extract_frequency(op2_file: Path, subcase: int = 1, mode_number: int = 1):
"""
Extract natural frequency results from modal analysis OP2 file.
Args:
op2_file: Path to OP2 file
subcase: Subcase ID for modal analysis (default: 1)
mode_number: Which mode to extract (1-indexed, default: 1 for fundamental frequency)
Returns:
Dictionary containing frequency data
"""
from pyNastran.op2.op2 import OP2
import numpy as np
model = OP2()
model.read_op2(str(op2_file))
# Access eigenvalues from modal analysis
# NX Nastran often uses empty string '' as subcase key
available_subcases = list(model.eigenvalues.keys())
if not available_subcases:
raise ValueError(f"No eigenvalue data found in OP2 file: {op2_file}")
# Use the first available subcase (often '' for NX Nastran)
actual_subcase = available_subcases[0]
eigenvalues = model.eigenvalues[actual_subcase]
# Extract frequency data for the specified mode
# RealEigenvalues object has different attributes than expected
# Try to access frequencies - could be in different formats
# Mode number is 1-indexed, array is 0-indexed
mode_idx = mode_number - 1
# Try different attribute names for frequencies
if hasattr(eigenvalues, 'cycles'):
frequencies = eigenvalues.cycles
elif hasattr(eigenvalues, 'frequencies'):
frequencies = eigenvalues.frequencies
else:
# Try to get eigenvalues and convert to frequency
if hasattr(eigenvalues, 'eigenvalues'):
# eigenvalue (lambda) = (2*pi*f)^2, so f = sqrt(lambda) / (2*pi)
eigenvals = eigenvalues.eigenvalues
frequencies = np.sqrt(np.abs(eigenvals)) / (2.0 * np.pi)
else:
raise ValueError(f"Cannot find frequency data in eigenvalues object. Available attributes: {dir(eigenvalues)}")
if mode_idx >= len(frequencies):
raise ValueError(f"Mode {mode_number} not found. Only {len(frequencies)} modes available.")
frequency = frequencies[mode_idx] # Hz
# Try to get eigenvalue if available
if hasattr(eigenvalues, 'eigenvalues'):
eigenvalue = eigenvalues.eigenvalues[mode_idx]
elif hasattr(eigenvalues, 'eigrs'):
eigenvalue = eigenvalues.eigrs[mode_idx]
else:
eigenvalue = (2.0 * np.pi * frequency) ** 2 # Convert back from frequency
# Also return all frequencies for reference
all_frequencies = list(frequencies)
return {
'frequency': float(frequency),
'mode_number': int(mode_number),
'eigenvalue': float(eigenvalue),
'all_frequencies': all_frequencies,
'n_modes': len(all_frequencies)
}
if __name__ == '__main__':
# Example usage
import sys
if len(sys.argv) > 1:
op2_file = Path(sys.argv[1])
mode_num = int(sys.argv[2]) if len(sys.argv) > 2 else 1
result = extract_frequency(op2_file, mode_number=mode_num)
print(f"Extraction result: {result}")
else:
print(f"Usage: python {sys.argv[0]} <op2_file> [mode_number]")

View File

@@ -0,0 +1,39 @@
"""
Extract total structural mass
Auto-generated by Atomizer Phase 3 - pyNastran Research Agent
Pattern: generic_extraction
Element Type: General
Result Type: unknown
API: model.<result_type>[subcase]
"""
from pathlib import Path
from typing import Dict, Any
import numpy as np
from pyNastran.op2.op2 import OP2
def extract_generic(op2_file: Path):
"""Generic OP2 extraction - needs customization."""
from pyNastran.op2.op2 import OP2
model = OP2()
model.read_op2(str(op2_file))
# TODO: Customize extraction based on requirements
# Available: model.displacements, model.ctetra_stress, etc.
# Use model.get_op2_stats() to see available results
return {'result': None}
if __name__ == '__main__':
# Example usage
import sys
if len(sys.argv) > 1:
op2_file = Path(sys.argv[1])
result = extract_generic(op2_file)
print(f"Extraction result: {result}")
else:
print("Usage: python {sys.argv[0]} <op2_file>")

View File

@@ -0,0 +1,62 @@
"""
Extract mass from Nastran BDF/DAT file as fallback when OP2 doesn't have GRDPNT
"""
from pathlib import Path
from typing import Dict, Any
import re
def extract_mass_from_bdf(bdf_file: Path) -> Dict[str, Any]:
"""
Extract mass from Nastran BDF file by parsing material and element definitions.
This is a fallback when OP2 doesn't have PARAM,GRDPNT output.
Args:
bdf_file: Path to .dat or .bdf file
Returns:
dict: {
'mass_kg': total mass in kg,
'mass_g': total mass in grams,
'method': 'bdf_calculation'
}
"""
bdf_file = Path(bdf_file)
if not bdf_file.exists():
raise FileNotFoundError(f"BDF file not found: {bdf_file}")
# Parse using pyNastran BDF reader
from pyNastran.bdf.bdf import read_bdf
model = read_bdf(str(bdf_file), validate=False, xref=True, punch=False,
encoding='utf-8', log=None, debug=False, mode='msc')
# Calculate total mass by summing element masses
# model.mass_properties() returns (mass, cg, inertia)
mass_properties = model.mass_properties()
mass_ton = mass_properties[0] # Mass in tons (ton-mm-sec)
# NX Nastran typically uses ton-mm-sec units
mass_kg = mass_ton * 1000.0 # Convert tons to kg
mass_g = mass_kg * 1000.0 # Convert kg to grams
return {
'mass_kg': mass_kg,
'mass_g': mass_g,
'mass_ton': mass_ton,
'method': 'bdf_calculation',
'units': 'ton-mm-sec (converted to kg/g)'
}
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
bdf_file = Path(sys.argv[1])
result = extract_mass_from_bdf(bdf_file)
print(f"Mass from BDF: {result['mass_kg']:.6f} kg ({result['mass_g']:.3f} g)")
else:
print(f"Usage: python {sys.argv[0]} <bdf_file>")

View File

@@ -0,0 +1,71 @@
"""
Extract mass from NX measure expression
This extractor reads mass from a temp file written by solve_simulation.py journal.
The journal extracts the mass from expression p173 and writes it to _temp_mass.txt
"""
from pathlib import Path
from typing import Dict, Any
import sys
def extract_mass_from_expression(prt_file: Path, expression_name: str = "p173") -> float:
"""
Extract mass from NX expression by reading temp file.
The solve_simulation.py journal extracts the p173 expression value
and writes it to _temp_mass.txt in the model directory.
Args:
prt_file: Path to .prt file with mass expression
expression_name: Name of the expression containing mass (default: "p173")
Returns:
Mass in kilograms
"""
prt_file = Path(prt_file)
if not prt_file.exists():
raise FileNotFoundError(f"Part file not found: {prt_file}")
# The mass is written to _temp_mass.txt in the same directory as the .prt
model_dir = prt_file.parent
mass_file = model_dir / "_temp_mass.txt"
if not mass_file.exists():
raise FileNotFoundError(
f"Mass temp file not found: {mass_file}\n"
f"The solve_simulation.py journal should have created this file."
)
# Read mass from file
try:
with open(mass_file, 'r') as f:
mass_kg = float(f.read().strip())
print(f"[OK] Mass from {expression_name}: {mass_kg:.6f} kg ({mass_kg * 1000:.2f} g)")
return mass_kg
except ValueError as e:
raise ValueError(f"Could not parse mass from {mass_file}: {e}")
except Exception as e:
raise RuntimeError(f"Failed to read mass file: {e}")
if __name__ == "__main__":
if len(sys.argv) > 1:
prt_file = Path(sys.argv[1])
expression_name = sys.argv[2] if len(sys.argv) > 2 else "p173"
else:
print(f"Usage: python {sys.argv[0]} <prt_file> [expression_name]")
sys.exit(1)
try:
mass_kg = extract_mass_from_expression(prt_file, expression_name)
print(f"\nMass: {mass_kg:.6f} kg ({mass_kg * 1000:.2f} g)")
except Exception as e:
print(f"\nERROR: {e}")
import traceback
traceback.print_exc()
sys.exit(1)

View File

@@ -0,0 +1,105 @@
"""
Extract maximum von Mises stress from structural analysis
Auto-generated by Atomizer Phase 3 - pyNastran Research Agent
Pattern: solid_stress
Element Type: CTETRA
Result Type: stress
API: model.ctetra_stress[subcase] or model.chexa_stress[subcase]
"""
from pathlib import Path
from typing import Dict, Any
import numpy as np
from pyNastran.op2.op2 import OP2
def extract_solid_stress(op2_file: Path, subcase: int = 1, element_type: str = 'ctetra'):
"""Extract stress from solid elements."""
from pyNastran.op2.op2 import OP2
import numpy as np
model = OP2()
model.read_op2(str(op2_file))
# Get stress object for element type
# Different element types have different stress attributes
stress_attr_map = {
'ctetra': 'ctetra_stress',
'chexa': 'chexa_stress',
'cquad4': 'cquad4_stress',
'ctria3': 'ctria3_stress'
}
stress_attr = stress_attr_map.get(element_type.lower())
if not stress_attr:
raise ValueError(f"Unknown element type: {element_type}")
# Access stress through op2_results container
# pyNastran structure: model.op2_results.stress.cquad4_stress[subcase]
stress_dict = None
if hasattr(model, 'op2_results') and hasattr(model.op2_results, 'stress'):
stress_container = model.op2_results.stress
if hasattr(stress_container, stress_attr):
stress_dict = getattr(stress_container, stress_attr)
if stress_dict is None:
raise ValueError(f"No {element_type} stress results in OP2. Available attributes: {[a for a in dir(model) if 'stress' in a.lower()]}")
# stress_dict is a dictionary with subcase IDs as keys
available_subcases = list(stress_dict.keys())
if not available_subcases:
raise ValueError(f"No stress data found in OP2 file")
# Use the specified subcase or first available
if subcase in available_subcases:
actual_subcase = subcase
else:
actual_subcase = available_subcases[0]
stress = stress_dict[actual_subcase]
itime = 0
# Extract von Mises if available
if stress.is_von_mises: # Property, not method
# Different element types have von Mises at different column indices
# Shell elements (CQUAD4, CTRIA3): 8 columns, von Mises at column 7
# Solid elements (CTETRA, CHEXA): 10 columns, von Mises at column 9
ncols = stress.data.shape[2]
if ncols == 8:
# Shell elements - von Mises is last column
von_mises_col = 7
elif ncols >= 10:
# Solid elements - von Mises is column 9
von_mises_col = 9
else:
# Unknown format, try last column
von_mises_col = ncols - 1
von_mises = stress.data[itime, :, von_mises_col]
max_stress = float(np.max(von_mises))
# Get element info
element_ids = [eid for (eid, node) in stress.element_node]
max_stress_elem = element_ids[np.argmax(von_mises)]
return {
'max_von_mises': max_stress,
'max_stress_element': int(max_stress_elem)
}
else:
raise ValueError("von Mises stress not available")
if __name__ == '__main__':
# Example usage
import sys
if len(sys.argv) > 1:
op2_file = Path(sys.argv[1])
result = extract_solid_stress(op2_file)
print(f"Extraction result: {result}")
else:
print("Usage: python {sys.argv[0]} <op2_file>")

View File

@@ -0,0 +1,208 @@
"""
Generic Field Data Extractor
=============================
Reusable extractor for parsing NX exported field data (.fld files).
Works for any result type: displacement, stress, strain, temperature, etc.
Supports TWO formats:
1. NX native .fld format (exported from ResultProbe)
2. CSV format with headers
Usage:
extractor = FieldDataExtractor(field_file="export_field_dz.fld")
results = extractor.extract()
max_value = results['max_abs_value']
"""
import csv
from pathlib import Path
from typing import Dict, Any, Optional, List
import numpy as np
class FieldDataExtractor:
"""
Generic extractor for NX exported field data files (.fld or .csv format).
Supports:
- Displacement (X, Y, Z components)
- Stress (von Mises, principals, components)
- Strain
- Temperature
- Any other scalar field data
"""
def __init__(
self,
field_file: str,
result_column: str = "x(mm)", # Column name to extract (for CSV format)
aggregation: str = "max_abs", # max_abs, max, min, mean, std
):
"""
Args:
field_file: Path to .fld or .csv file
result_column: Column name containing the result values (for CSV format, ignored for .fld)
aggregation: How to aggregate values (max_abs, max, min, mean, std)
"""
self.field_file = Path(field_file)
self.result_column = result_column
self.aggregation = aggregation
def _is_nx_field_format(self) -> bool:
"""Check if file is NX field format or CSV"""
with open(self.field_file, 'r') as f:
first_line = f.readline()
return first_line.startswith('FIELD:')
def _parse_nx_field_file(self) -> List[float]:
"""
Parse NX native field export format (.fld).
Format:
FIELD: [ResultProbe] : [TABLE]
...metadata...
START DATA
step, node_id, value
0, 396, -0.086716040968895
...
END DATA
"""
values = []
in_data_section = False
with open(self.field_file, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('START DATA'):
in_data_section = True
continue
if line.startswith('END DATA'):
break
if in_data_section and line:
# Data format: step, node_id, value
parts = [p.strip() for p in line.split(',')]
if len(parts) >= 3:
try:
value = float(parts[2]) # Third column is the value
values.append(value)
except ValueError:
continue
return values
def _parse_csv_file(self) -> List[float]:
"""
Parse CSV file with column headers.
Expected format:
node_id,x(mm),y(mm),z(mm)
1,0.0,0.0,0.5
2,0.1,0.0,0.6
...
"""
values = []
with open(self.field_file, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
try:
value = float(row[self.result_column])
values.append(value)
except (KeyError, ValueError) as e:
continue # Skip invalid rows
return values
def extract(self) -> Dict[str, Any]:
"""
Extract and aggregate field data.
Returns:
dict: {
'value': aggregated value,
'all_values': list of all values,
'node_count': number of nodes,
'stats': {min, max, mean, std, max_abs}
}
"""
if not self.field_file.exists():
raise FileNotFoundError(f"Field file not found: {self.field_file}")
# Detect format and parse
if self._is_nx_field_format():
values = self._parse_nx_field_file()
else:
values = self._parse_csv_file()
if not values:
raise ValueError(f"No valid data found in field file: {self.field_file}")
values_array = np.array(values)
# Calculate statistics
stats = {
'min': float(np.min(values_array)),
'max': float(np.max(values_array)),
'mean': float(np.mean(values_array)),
'std': float(np.std(values_array)),
'max_abs': float(np.max(np.abs(values_array))),
}
# Get aggregated value based on method
aggregation_map = {
'max': stats['max'],
'min': stats['min'],
'mean': stats['mean'],
'std': stats['std'],
'max_abs': stats['max_abs'],
}
aggregated_value = aggregation_map.get(self.aggregation, stats['max_abs'])
return {
'value': aggregated_value,
'all_values': values,
'node_count': len(values),
'stats': stats,
'aggregation_method': self.aggregation,
}
def extract_displacement_field(
field_file: str,
component: str = "z", # x, y, or z
aggregation: str = "max_abs"
) -> Dict[str, Any]:
"""
Convenience function for extracting displacement data.
Args:
field_file: Path to displacement field file
component: Displacement component (x, y, or z)
aggregation: Aggregation method
Returns:
Extracted displacement data
"""
column_map = {
'x': 'x(mm)',
'y': 'y(mm)',
'z': 'z(mm)',
}
column = column_map.get(component.lower(), 'x(mm)')
extractor = FieldDataExtractor(field_file, result_column=column, aggregation=aggregation)
return extractor.extract()
if __name__ == "__main__":
# Example usage
import sys
if len(sys.argv) > 1:
field_file = sys.argv[1]
results = extract_displacement_field(field_file, component="z")
print(f"Max absolute Z-displacement: {results['value']:.6f} mm")
print(f"Statistics: {results['stats']}")

View File

@@ -0,0 +1,242 @@
"""
Generic OP2 Extractor
====================
Reusable extractor for NX Nastran OP2 files using pyNastran.
Extracts mass properties, forces, displacements, stresses, etc.
Usage:
extractor = OP2Extractor(op2_file="model.op2")
mass = extractor.extract_mass()
forces = extractor.extract_grid_point_forces()
"""
from pathlib import Path
from typing import Dict, Any, Optional, List
import numpy as np
try:
from pyNastran.op2.op2 import read_op2
except ImportError:
raise ImportError("pyNastran is required. Install with: pip install pyNastran")
class OP2Extractor:
"""
Generic extractor for Nastran OP2 files.
Supports:
- Mass properties
- Grid point forces
- Displacements
- Stresses
- Strains
- Element forces
"""
def __init__(self, op2_file: str):
"""
Args:
op2_file: Path to .op2 file
"""
self.op2_file = Path(op2_file)
self._op2_model = None
def _load_op2(self):
"""Lazy load OP2 file"""
if self._op2_model is None:
if not self.op2_file.exists():
raise FileNotFoundError(f"OP2 file not found: {self.op2_file}")
self._op2_model = read_op2(str(self.op2_file), debug=False)
return self._op2_model
def extract_mass(self, subcase_id: Optional[int] = None) -> Dict[str, Any]:
"""
Extract mass properties from OP2.
Returns:
dict: {
'mass_kg': total mass in kg,
'mass_g': total mass in grams,
'cg': [x, y, z] center of gravity,
'inertia': 3x3 inertia matrix
}
"""
op2 = self._load_op2()
# Get grid point weight (mass properties)
if not hasattr(op2, 'grid_point_weight') or not op2.grid_point_weight:
raise ValueError("No mass properties found in OP2 file")
gpw = op2.grid_point_weight
# Mass is typically in the first element of MO matrix (reference point mass)
# OP2 stores mass in ton, mm, sec units typically
mass_matrix = gpw.MO[0, 0] if hasattr(gpw, 'MO') else None
# Get reference point
if hasattr(gpw, 'reference_point') and gpw.reference_point:
ref_point = gpw.reference_point
else:
ref_point = 0
# Extract mass (convert based on units)
# Nastran default: ton-mm-sec → need to convert to kg
if mass_matrix is not None:
mass_ton = mass_matrix
mass_kg = mass_ton * 1000.0 # 1 ton = 1000 kg
else:
raise ValueError("Could not extract mass from OP2")
# Extract CG if available
cg = [0.0, 0.0, 0.0]
if hasattr(gpw, 'cg'):
cg = gpw.cg.tolist() if hasattr(gpw.cg, 'tolist') else list(gpw.cg)
return {
'mass_kg': mass_kg,
'mass_g': mass_kg * 1000.0,
'mass_ton': mass_ton,
'cg': cg,
'reference_point': ref_point,
'units': 'ton-mm-sec (converted to kg)',
}
def extract_grid_point_forces(
self,
subcase_id: Optional[int] = None,
component: str = "total" # total, fx, fy, fz, mx, my, mz
) -> Dict[str, Any]:
"""
Extract grid point forces from OP2.
Args:
subcase_id: Subcase ID (if None, uses first available)
component: Force component to extract
Returns:
dict: {
'force': resultant force value,
'all_forces': list of forces at each grid point,
'max_force': maximum force,
'total_force': sum of all forces
}
"""
op2 = self._load_op2()
if not hasattr(op2, 'grid_point_forces') or not op2.grid_point_forces:
raise ValueError("No grid point forces found in OP2 file")
# Get first subcase if not specified
if subcase_id is None:
subcase_id = list(op2.grid_point_forces.keys())[0]
gpf = op2.grid_point_forces[subcase_id]
# Extract forces based on component
# Grid point forces table typically has columns: fx, fy, fz, mx, my, mz
if component == "total":
# Calculate resultant force: sqrt(fx^2 + fy^2 + fz^2)
forces = np.sqrt(gpf.data[:, 0]**2 + gpf.data[:, 1]**2 + gpf.data[:, 2]**2)
elif component == "fx":
forces = gpf.data[:, 0]
elif component == "fy":
forces = gpf.data[:, 1]
elif component == "fz":
forces = gpf.data[:, 2]
else:
raise ValueError(f"Unknown component: {component}")
return {
'force': float(np.max(np.abs(forces))),
'all_forces': forces.tolist(),
'max_force': float(np.max(forces)),
'min_force': float(np.min(forces)),
'total_force': float(np.sum(forces)),
'component': component,
'subcase_id': subcase_id,
}
def extract_applied_loads(self, subcase_id: Optional[int] = None) -> Dict[str, Any]:
"""
Extract applied loads from OP2 file.
This attempts to get load vector information if available.
Note: Not all OP2 files contain this data.
Returns:
dict: Load information
"""
op2 = self._load_op2()
# Try to get load vectors
if hasattr(op2, 'load_vectors') and op2.load_vectors:
if subcase_id is None:
subcase_id = list(op2.load_vectors.keys())[0]
lv = op2.load_vectors[subcase_id]
loads = lv.data
return {
'total_load': float(np.sum(np.abs(loads))),
'max_load': float(np.max(np.abs(loads))),
'load_resultant': float(np.linalg.norm(loads)),
'subcase_id': subcase_id,
}
else:
# Fallback: use grid point forces as approximation
return self.extract_grid_point_forces(subcase_id)
def extract_mass_from_op2(op2_file: str) -> float:
"""
Convenience function to extract mass in kg.
Args:
op2_file: Path to .op2 file
Returns:
Mass in kilograms
"""
extractor = OP2Extractor(op2_file)
result = extractor.extract_mass()
return result['mass_kg']
def extract_force_from_op2(
op2_file: str,
component: str = "fz"
) -> float:
"""
Convenience function to extract force component.
Args:
op2_file: Path to .op2 file
component: Force component (fx, fy, fz, or total)
Returns:
Force value
"""
extractor = OP2Extractor(op2_file)
result = extractor.extract_grid_point_forces(component=component)
return result['force']
if __name__ == "__main__":
# Example usage
import sys
if len(sys.argv) > 1:
op2_file = sys.argv[1]
extractor = OP2Extractor(op2_file)
# Extract mass
mass_result = extractor.extract_mass()
print(f"Mass: {mass_result['mass_kg']:.6f} kg")
print(f"CG: {mass_result['cg']}")
# Extract forces
try:
force_result = extractor.extract_grid_point_forces(component="fz")
print(f"Max Fz: {force_result['force']:.2f} N")
except ValueError as e:
print(f"Forces not available: {e}")

View File

@@ -0,0 +1,172 @@
"""
Generic Stiffness Calculator
============================
Reusable calculator for structural stiffness from FEA results.
Works with any structure: bracket, beam, plate, etc.
Stiffness (k) = Applied Force (F) / Displacement (δ)
Usage:
calculator = StiffnessCalculator(
field_file="export_field_dz.fld",
op2_file="model.op2",
force_component="fz",
displacement_component="z"
)
results = calculator.calculate()
stiffness = results['stiffness']
"""
from pathlib import Path
from typing import Dict, Any, Optional
from .field_data_extractor import FieldDataExtractor
from .op2_extractor import OP2Extractor
class StiffnessCalculator:
"""
Generic stiffness calculator for structural analysis.
Can be used for:
- Bracket stiffness (force/deflection)
- Beam bending stiffness
- Plate flexural stiffness
- Any structure where k = F/δ
"""
def __init__(
self,
field_file: str,
op2_file: str,
force_component: str = "fz", # fx, fy, fz, total
displacement_component: str = "z", # x, y, z
displacement_aggregation: str = "max_abs",
applied_force: Optional[float] = None, # If known, skip OP2 extraction
):
"""
Args:
field_file: Path to displacement field file (.fld or .csv)
op2_file: Path to OP2 file
force_component: Force component to extract (fx, fy, fz, total)
displacement_component: Displacement component (x, y, z)
displacement_aggregation: How to aggregate displacement (max_abs, max, min)
applied_force: Optional pre-known force value (skips OP2 extraction)
"""
self.field_file = Path(field_file)
self.op2_file = Path(op2_file)
self.force_component = force_component
self.displacement_component = displacement_component
self.displacement_aggregation = displacement_aggregation
self.applied_force = applied_force
def calculate(self) -> Dict[str, Any]:
"""
Calculate stiffness from FEA results.
Returns:
dict: {
'stiffness': k value (N/mm or appropriate units),
'displacement': max displacement (mm),
'force': applied force (N),
'compliance': 1/k (mm/N),
'units': unit description
}
"""
# Extract displacement
disp_extractor = FieldDataExtractor(
field_file=str(self.field_file),
result_column=f"{self.displacement_component}(mm)",
aggregation=self.displacement_aggregation
)
disp_result = disp_extractor.extract()
displacement = disp_result['value'] # mm
if displacement == 0:
raise ValueError(f"Zero displacement found. Check field file: {self.field_file}")
# Extract force
if self.applied_force is not None:
force = self.applied_force
else:
op2_extractor = OP2Extractor(op2_file=str(self.op2_file))
force_result = op2_extractor.extract_grid_point_forces(component=self.force_component)
force = force_result['force'] # N (or mN, check units)
# Note: Forces in OP2 might be in milli-Newtons (mN)
# Check f06 header for units and convert if needed
# For now, assuming Newtons
if force == 0:
raise ValueError(f"Zero force found. Check OP2 file: {self.op2_file}")
# Calculate stiffness: k = F / δ
stiffness = force / displacement # N/mm
# Calculate compliance (inverse of stiffness)
compliance = displacement / force # mm/N
return {
'stiffness': stiffness,
'displacement': displacement,
'force': force,
'compliance': compliance,
'units': {
'stiffness': 'N/mm',
'displacement': 'mm',
'force': 'N (verify from f06)',
'compliance': 'mm/N'
},
'field_file': str(self.field_file),
'op2_file': str(self.op2_file),
'displacement_stats': disp_result['stats'],
}
def calculate_stiffness(
field_file: str,
op2_file: str,
force_component: str = "fz",
displacement_component: str = "z"
) -> float:
"""
Convenience function to calculate stiffness.
Args:
field_file: Path to displacement field file
op2_file: Path to OP2 file
force_component: Force component (fx, fy, fz, total)
displacement_component: Displacement component (x, y, z)
Returns:
Stiffness value (N/mm)
"""
calculator = StiffnessCalculator(
field_file=field_file,
op2_file=op2_file,
force_component=force_component,
displacement_component=displacement_component
)
result = calculator.calculate()
return result['stiffness']
if __name__ == "__main__":
# Example usage
import sys
if len(sys.argv) > 2:
field_file = sys.argv[1]
op2_file = sys.argv[2]
calculator = StiffnessCalculator(
field_file=field_file,
op2_file=op2_file,
force_component="fz",
displacement_component="z"
)
results = calculator.calculate()
print(f"Stiffness: {results['stiffness']:.2f} N/mm")
print(f"Displacement: {results['displacement']:.6f} mm")
print(f"Force: {results['force']:.2f} N")
print(f"Compliance: {results['compliance']:.6e} mm/N")

View File

@@ -475,6 +475,76 @@ sys.argv = ['', {argv_str}] # Set argv for the main function
return errors[:10] # Limit to first 10 errors
def discover_model(self, sim_file: Path) -> Dict[str, Any]:
"""
Discover model information without solving.
This scans the NX simulation file and reports:
- All solutions (names, types)
- All expressions (potential design variables)
- Mesh info
- Linked geometry parts
Args:
sim_file: Path to .sim file
Returns:
Dictionary with discovered model info
"""
import json
sim_file = Path(sim_file)
if not sim_file.exists():
return {'success': False, 'error': f'Sim file not found: {sim_file}'}
# Use the discover_model journal
discover_journal = Path(__file__).parent.parent / "nx_journals" / "discover_model.py"
if not discover_journal.exists():
return {'success': False, 'error': f'Discovery journal not found: {discover_journal}'}
print(f"\n[NX SOLVER] Discovering model: {sim_file.name}")
print(f" Using journal: {discover_journal.name}")
try:
cmd = [str(self.solver_exe), str(discover_journal), '--', str(sim_file.absolute())]
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=60, # 1 minute timeout for discovery
cwd=str(sim_file.parent)
)
# Print stderr (debug/progress messages)
if result.stderr:
for line in result.stderr.strip().split('\n'):
print(f" {line}")
# Parse stdout as JSON
if result.stdout:
try:
discovery_result = json.loads(result.stdout)
return discovery_result
except json.JSONDecodeError as e:
return {
'success': False,
'error': f'Failed to parse discovery output: {e}',
'raw_output': result.stdout[:1000]
}
else:
return {
'success': False,
'error': 'No output from discovery journal',
'stderr': result.stderr
}
except subprocess.TimeoutExpired:
return {'success': False, 'error': 'Discovery timeout (60s)'}
except Exception as e:
return {'success': False, 'error': str(e)}
def _cleanup_temp_files(self, working_dir: Path, base_name: str):
"""Remove temporary solver files."""
# Files to keep