Files
Atomizer/atomizer-field/Instructions.md
Antoine d5ffba099e feat: Merge Atomizer-Field neural network module into main repository
Permanently integrates the Atomizer-Field GNN surrogate system:
- neural_models/: Graph Neural Network for FEA field prediction
- batch_parser.py: Parse training data from FEA exports
- train.py: Neural network training pipeline
- predict.py: Inference engine for fast predictions

This enables 600x-2200x speedup over traditional FEA by replacing
expensive simulations with millisecond neural network predictions.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-26 15:31:33 -05:00

21 KiB

Neural Field Data Parser: From NX Nastran Files to Training Data Complete Implementation Guide

What You Have vs What You Need What NX Nastran Gives You: Files Available:

.sim - Simulation file with load/BC definitions .fem - Finite element model .prt - Part geometry .bdf/.dat - Nastran input deck (mesh, materials, loads, BCs) .op2 - Binary results (stress, displacement, strain) .f06 - ASCII results (human readable) .log - Solver log

This is SUFFICIENT! The BDF contains everything about setup, OP2 contains all results.

Step-by-Step Instructions for Manual Data Generation Step 1: Set Up Your Analysis in NX

  1. Create your geometry in NX
  2. Generate mesh (record statistics)
  3. Apply materials
  4. Define boundary conditions:
    • Fixed supports
    • Pinned constraints
    • Contact (if needed)
  5. Apply loads:
    • Forces
    • Pressures
    • Gravity
  6. Set up solution parameters
  7. Run analysis
  8. Ensure these files are generated:
    • model.bdf (or .dat)
    • model.op2
    • model.f06 Step 2: Organize Your Files training_case_001/ ├── input/ │ ├── model.bdf # Main input deck │ ├── model.sim # NX simulation file │ └── geometry.prt # Original geometry ├── output/ │ ├── model.op2 # Binary results │ ├── model.f06 # ASCII results │ └── model.log # Solver log └── metadata.json # Your manual annotations

Python Parser Implementation Main Parser Script python""" neural_field_parser.py Parses NX Nastran files into Neural Field training data """

import json import numpy as np import h5py from pathlib import Path from datetime import datetime import hashlib

pyNastran imports

from pyNastran.bdf.bdf import BDF from pyNastran.op2.op2 import OP2

class NastranToNeuralFieldParser: """ Parses Nastran BDF/OP2 files into Neural Field data structure """

def __init__(self, case_directory):
    self.case_dir = Path(case_directory)
    self.bdf_file = self.case_dir / "input" / "model.bdf"
    self.op2_file = self.case_dir / "output" / "model.op2"
    
    # Initialize readers
    self.bdf = BDF(debug=False)
    self.op2 = OP2(debug=False)
    
    # Data structure
    self.neural_field_data = {
        "metadata": {},
        "geometry": {},
        "mesh": {},
        "materials": {},
        "boundary_conditions": {},
        "loads": {},
        "results": {}
    }
    
def parse_all(self):
    """
    Main parsing function
    """
    print("Starting parse of Nastran files...")
    
    # Parse input deck
    print("Reading BDF file...")
    self.bdf.read_bdf(str(self.bdf_file))
    
    # Parse results
    print("Reading OP2 file...")
    self.op2.read_op2(str(self.op2_file))
    
    # Extract all data
    self.extract_metadata()
    self.extract_mesh()
    self.extract_materials()
    self.extract_boundary_conditions()
    self.extract_loads()
    self.extract_results()
    
    # Save to file
    self.save_data()
    
    print("Parse complete!")
    return self.neural_field_data

def extract_metadata(self):
    """
    Extract metadata and analysis info
    """
    self.neural_field_data["metadata"] = {
        "version": "1.0.0",
        "created_at": datetime.now().isoformat(),
        "source": "NX_Nastran",
        "case_directory": str(self.case_dir),
        "analysis_type": self.op2.sol,  # SOL 101, 103, etc.
        "title": self.bdf.case_control_deck.title.title if hasattr(self.bdf.case_control_deck, 'title') else "",
        "units": {
            "length": "mm",  # You may need to specify this
            "force": "N",
            "stress": "Pa",
            "temperature": "K"
        }
    }

def extract_mesh(self):
    """
    Extract mesh data from BDF
    """
    print("Extracting mesh...")
    
    # Nodes
    nodes = []
    node_ids = []
    for nid, node in sorted(self.bdf.nodes.items()):
        node_ids.append(nid)
        nodes.append(node.get_position())
    
    nodes_array = np.array(nodes)
    
    # Elements
    element_data = {
        "solid": [],
        "shell": [],
        "beam": [],
        "rigid": []
    }
    
    # Solid elements (TETRA, HEXA, PENTA)
    for eid, elem in self.bdf.elements.items():
        elem_type = elem.type
        
        if elem_type in ['CTETRA', 'CHEXA', 'CPENTA', 'CTETRA10', 'CHEXA20']:
            element_data["solid"].append({
                "id": eid,
                "type": elem_type,
                "nodes": elem.node_ids,
                "material_id": elem.mid,
                "property_id": elem.pid if hasattr(elem, 'pid') else None
            })
            
        elif elem_type in ['CQUAD4', 'CTRIA3', 'CQUAD8', 'CTRIA6']:
            element_data["shell"].append({
                "id": eid,
                "type": elem_type,
                "nodes": elem.node_ids,
                "material_id": elem.mid,
                "property_id": elem.pid,
                "thickness": elem.T() if hasattr(elem, 'T') else None
            })
            
        elif elem_type in ['CBAR', 'CBEAM', 'CROD']:
            element_data["beam"].append({
                "id": eid,
                "type": elem_type,
                "nodes": elem.node_ids,
                "material_id": elem.mid,
                "property_id": elem.pid
            })
            
        elif elem_type in ['RBE2', 'RBE3', 'RBAR']:
            element_data["rigid"].append({
                "id": eid,
                "type": elem_type,
                "nodes": elem.node_ids
            })
    
    # Store mesh data
    self.neural_field_data["mesh"] = {
        "statistics": {
            "n_nodes": len(nodes),
            "n_elements": len(self.bdf.elements),
            "element_types": {
                "solid": len(element_data["solid"]),
                "shell": len(element_data["shell"]),
                "beam": len(element_data["beam"]),
                "rigid": len(element_data["rigid"])
            }
        },
        "nodes": {
            "ids": node_ids,
            "coordinates": nodes_array.tolist(),
            "shape": list(nodes_array.shape)
        },
        "elements": element_data
    }

def extract_materials(self):
    """
    Extract material properties
    """
    print("Extracting materials...")
    
    materials = []
    for mid, mat in self.bdf.materials.items():
        mat_data = {
            "id": mid,
            "type": mat.type
        }
        
        if mat.type == 'MAT1':  # Isotropic material
            mat_data.update({
                "E": mat.e,      # Young's modulus
                "nu": mat.nu,    # Poisson's ratio
                "rho": mat.rho,  # Density
                "G": mat.g,      # Shear modulus
                "alpha": mat.a if hasattr(mat, 'a') else None,  # Thermal expansion
                "tref": mat.tref if hasattr(mat, 'tref') else None,
                "ST": mat.St() if hasattr(mat, 'St') else None,  # Tensile stress limit
                "SC": mat.Sc() if hasattr(mat, 'Sc') else None,  # Compressive stress limit
                "SS": mat.Ss() if hasattr(mat, 'Ss') else None   # Shear stress limit
            })
            
        materials.append(mat_data)
    
    self.neural_field_data["materials"] = materials

def extract_boundary_conditions(self):
    """
    Extract boundary conditions from BDF
    """
    print("Extracting boundary conditions...")
    
    bcs = {
        "spc": [],      # Single point constraints
        "mpc": [],      # Multi-point constraints
        "suport": []    # Free body supports
    }
    
    # SPC (fixed DOFs)
    for spc_id, spc_list in self.bdf.spcs.items():
        for spc in spc_list:
            bcs["spc"].append({
                "id": spc_id,
                "node": spc.node_ids[0] if hasattr(spc, 'node_ids') else spc.node,
                "dofs": spc.components,  # Which DOFs are constrained (123456)
                "enforced_motion": spc.enforced
            })
    
    # MPC equations
    for mpc_id, mpc_list in self.bdf.mpcs.items():
        for mpc in mpc_list:
            bcs["mpc"].append({
                "id": mpc_id,
                "nodes": mpc.node_ids,
                "coefficients": mpc.coefficients,
                "components": mpc.components
            })
    
    self.neural_field_data["boundary_conditions"] = bcs

def extract_loads(self):
    """
    Extract loads from BDF
    """
    print("Extracting loads...")
    
    loads = {
        "point_forces": [],
        "pressure": [],
        "gravity": [],
        "thermal": []
    }
    
    # Point forces (FORCE, MOMENT)
    for load_id, load_list in self.bdf.loads.items():
        for load in load_list:
            if load.type == 'FORCE':
                loads["point_forces"].append({
                    "id": load_id,
                    "node": load.node,
                    "magnitude": load.mag,
                    "direction": [load.xyz[0], load.xyz[1], load.xyz[2]],
                    "coord_system": load.cid
                })
                
            elif load.type == 'MOMENT':
                loads["point_forces"].append({
                    "id": load_id,
                    "node": load.node,
                    "moment": load.mag,
                    "direction": [load.xyz[0], load.xyz[1], load.xyz[2]],
                    "coord_system": load.cid
                })
                
            elif load.type in ['PLOAD', 'PLOAD2', 'PLOAD4']:
                loads["pressure"].append({
                    "id": load_id,
                    "elements": load.element_ids,
                    "pressure": load.pressure,
                    "type": load.type
                })
                
            elif load.type == 'GRAV':
                loads["gravity"].append({
                    "id": load_id,
                    "acceleration": load.scale,
                    "direction": [load.N[0], load.N[1], load.N[2]],
                    "coord_system": load.cid
                })
    
    # Temperature loads
    for temp_id, temp_list in self.bdf.temps.items():
        for temp in temp_list:
            loads["thermal"].append({
                "id": temp_id,
                "node": temp.node,
                "temperature": temp.temperature
            })
    
    self.neural_field_data["loads"] = loads

def extract_results(self):
    """
    Extract results from OP2
    """
    print("Extracting results...")
    
    results = {}
    
    # Get subcase ID (usually 1 for linear static)
    subcase_id = 1
    
    # Displacement
    if hasattr(self.op2, 'displacements'):
        disp = self.op2.displacements[subcase_id]
        disp_data = disp.data[0, :, :]  # [itime=0, all_nodes, 6_dofs]
        
        results["displacement"] = {
            "node_ids": disp.node_gridtype[:, 0].tolist(),
            "data": disp_data.tolist(),
            "shape": list(disp_data.shape),
            "max_magnitude": float(np.max(np.linalg.norm(disp_data[:, :3], axis=1)))
        }
    
    # Stress - handle different element types
    stress_results = {}
    
    # Solid stress
    if hasattr(self.op2, 'ctetra_stress'):
        stress = self.op2.ctetra_stress[subcase_id]
        stress_data = stress.data[0, :, :]
        stress_results["solid_stress"] = {
            "element_ids": stress.element_node[:, 0].tolist(),
            "data": stress_data.tolist(),
            "von_mises": stress_data[:, -1].tolist() if stress_data.shape[1] > 6 else None
        }
    
    # Shell stress  
    if hasattr(self.op2, 'cquad4_stress'):
        stress = self.op2.cquad4_stress[subcase_id]
        stress_data = stress.data[0, :, :]
        stress_results["shell_stress"] = {
            "element_ids": stress.element_node[:, 0].tolist(),
            "data": stress_data.tolist()
        }
    
    results["stress"] = stress_results
    
    # Strain
    strain_results = {}
    if hasattr(self.op2, 'ctetra_strain'):
        strain = self.op2.ctetra_strain[subcase_id]
        strain_data = strain.data[0, :, :]
        strain_results["solid_strain"] = {
            "element_ids": strain.element_node[:, 0].tolist(),
            "data": strain_data.tolist()
        }
    
    results["strain"] = strain_results
    
    # SPC Forces (reactions)
    if hasattr(self.op2, 'spc_forces'):
        spc = self.op2.spc_forces[subcase_id]
        spc_data = spc.data[0, :, :]
        results["reactions"] = {
            "node_ids": spc.node_gridtype[:, 0].tolist(),
            "forces": spc_data.tolist()
        }
    
    self.neural_field_data["results"] = results

def save_data(self):
    """
    Save parsed data to JSON and HDF5
    """
    print("Saving data...")
    
    # Save JSON metadata
    json_file = self.case_dir / "neural_field_data.json"
    with open(json_file, 'w') as f:
        # Convert numpy arrays to lists for JSON serialization
        json.dump(self.neural_field_data, f, indent=2, default=str)
    
    # Save HDF5 for large arrays
    h5_file = self.case_dir / "neural_field_data.h5"
    with h5py.File(h5_file, 'w') as f:
        # Save mesh data
        mesh_grp = f.create_group('mesh')
        mesh_grp.create_dataset('node_coordinates', 
                               data=np.array(self.neural_field_data["mesh"]["nodes"]["coordinates"]))
        
        # Save results
        if "results" in self.neural_field_data:
            results_grp = f.create_group('results')
            if "displacement" in self.neural_field_data["results"]:
                results_grp.create_dataset('displacement', 
                                         data=np.array(self.neural_field_data["results"]["displacement"]["data"]))
    
    print(f"Data saved to {json_file} and {h5_file}")

============================================================================

USAGE SCRIPT

============================================================================

def main(): """ Main function to run the parser """ import sys

if len(sys.argv) < 2:
    print("Usage: python neural_field_parser.py <case_directory>")
    sys.exit(1)

case_dir = sys.argv[1]

# Create parser
parser = NastranToNeuralFieldParser(case_dir)

# Parse all data
try:
    data = parser.parse_all()
    print("\nParsing successful!")
    print(f"Nodes: {data['mesh']['statistics']['n_nodes']}")
    print(f"Elements: {data['mesh']['statistics']['n_elements']}")
    print(f"Materials: {len(data['materials'])}")
    
except Exception as e:
    print(f"\nError during parsing: {e}")
    import traceback
    traceback.print_exc()

if name == "main": main()

Validation Script python""" validate_parsed_data.py Validates the parsed neural field data """

import json import h5py import numpy as np from pathlib import Path

class NeuralFieldDataValidator: """ Validates parsed data for completeness and consistency """

def __init__(self, case_directory):
    self.case_dir = Path(case_directory)
    self.json_file = self.case_dir / "neural_field_data.json"
    self.h5_file = self.case_dir / "neural_field_data.h5"
    
def validate(self):
    """
    Run all validation checks
    """
    print("Starting validation...")
    
    # Load data
    with open(self.json_file, 'r') as f:
        data = json.load(f)
    
    # Check required fields
    required_fields = [
        "metadata", "mesh", "materials", 
        "boundary_conditions", "loads", "results"
    ]
    
    for field in required_fields:
        if field not in data:
            print(f"❌ Missing required field: {field}")
            return False
        else:
            print(f"✅ Found {field}")
    
    # Validate mesh
    n_nodes = data["mesh"]["statistics"]["n_nodes"]
    n_elements = data["mesh"]["statistics"]["n_elements"]
    
    print(f"\nMesh Statistics:")
    print(f"  Nodes: {n_nodes}")
    print(f"  Elements: {n_elements}")
    
    # Check results consistency
    if "displacement" in data["results"]:
        disp_nodes = len(data["results"]["displacement"]["node_ids"])
        if disp_nodes != n_nodes:
            print(f"⚠️  Displacement nodes ({disp_nodes}) != mesh nodes ({n_nodes})")
    
    # Check HDF5 file
    with h5py.File(self.h5_file, 'r') as f:
        print(f"\nHDF5 Contents:")
        for key in f.keys():
            print(f"  {key}: {list(f[key].keys())}")
    
    print("\n✅ Validation complete!")
    return True

if name == "main": import sys validator = NeuralFieldDataValidator(sys.argv[1]) validator.validate()

Step-by-Step Usage Instructions

  1. Prepare Your Analysis bash# In NX:
  2. Create geometry
  3. Generate mesh
  4. Apply materials (MAT1 cards)
  5. Apply constraints (SPC)
  6. Apply loads (FORCE, PLOAD4)
  7. Run SOL 101 (Linear Static)
  8. Request output: DISPLACEMENT=ALL, STRESS=ALL, STRAIN=ALL
  9. Organize Files bashmkdir training_case_001 mkdir training_case_001/input mkdir training_case_001/output

Copy files

cp your_model.bdf training_case_001/input/model.bdf cp your_model.op2 training_case_001/output/model.op2 cp your_model.f06 training_case_001/output/model.f06 3. Run Parser bash# Install requirements pip install pyNastran numpy h5py

Run parser

python neural_field_parser.py training_case_001

Validate

python validate_parsed_data.py training_case_001 4. Check Output You'll get:

neural_field_data.json - Complete metadata and structure neural_field_data.h5 - Large arrays (mesh, results)

Automation Script for Multiple Cases python""" batch_parser.py Parse multiple cases automatically """

import os from pathlib import Path from neural_field_parser import NastranToNeuralFieldParser

def batch_parse(root_directory): """ Parse all cases in directory """ root = Path(root_directory) cases = [d for d in root.iterdir() if d.is_dir()]

results = []
for case in cases:
    print(f"\nProcessing {case.name}...")
    try:
        parser = NastranToNeuralFieldParser(case)
        data = parser.parse_all()
        results.append({
            "case": case.name,
            "status": "success",
            "nodes": data["mesh"]["statistics"]["n_nodes"],
            "elements": data["mesh"]["statistics"]["n_elements"]
        })
    except Exception as e:
        results.append({
            "case": case.name,
            "status": "failed",
            "error": str(e)
        })

# Summary
print("\n" + "="*50)
print("BATCH PROCESSING COMPLETE")
print("="*50)
for r in results:
    status = "✅" if r["status"] == "success" else "❌"
    print(f"{status} {r['case']}: {r['status']}")

return results

if name == "main": batch_parse("./training_data")

What to Add Manually Create a metadata.json in each case directory with design intent: json{ "design_parameters": { "thickness": 2.5, "fillet_radius": 5.0, "rib_height": 15.0 }, "optimization_context": { "objectives": ["minimize_weight", "minimize_stress"], "constraints": ["max_displacement < 2mm"], "iteration": 42 }, "notes": "Baseline design with standard loading" }

Troubleshooting Common Issues:

"Can't find BDF nodes"

Make sure you're using .bdf or .dat, not .sim Check that mesh was exported to solver deck

"OP2 has no results"

Ensure analysis completed successfully Check that you requested output (DISP=ALL, STRESS=ALL)

"Memory error with large models"

Use HDF5 chunking for very large models Process in batches

This parser gives you everything you need to start training neural networks on your FEA data. The format is future-proof and will work with your automated generation pipeline!