Files
Atomizer/atomizer-field/atomizer_field_config.yaml

240 lines
6.1 KiB
YAML
Raw Normal View History

# AtomizerField Configuration
# Long-term vision configuration for neural field learning
# ============================================================================
# Model Architecture
# ============================================================================
model:
type: "graph_neural_network"
architecture: "message_passing"
# Foundation model settings (for transfer learning)
foundation:
enabled: false # Set to true when foundation model available
path: "models/physics_foundation_v1.pt"
freeze: true # Freeze foundation layers during fine-tuning
# Adaptation layers (for fine-tuning on new component types)
adaptation:
layers: 2
neurons: 128
dropout: 0.1
# Core GNN parameters
gnn:
node_feature_dim: 12 # [x,y,z, BC(6), loads(3)]
edge_feature_dim: 5 # [E, nu, rho, G, alpha]
hidden_dim: 128
num_layers: 6
dropout: 0.1
# Output decoders
decoders:
displacement:
enabled: true
output_dim: 6 # [ux, uy, uz, rx, ry, rz]
stress:
enabled: true
output_dim: 6 # [sxx, syy, szz, txy, tyz, txz]
# ============================================================================
# Training Configuration
# ============================================================================
training:
# Progressive training (coarse to fine meshes)
progressive:
enabled: false # Enable for multi-resolution training
stages:
- resolution: "coarse"
max_nodes: 5000
epochs: 20
lr: 0.001
- resolution: "medium"
max_nodes: 20000
epochs: 10
lr: 0.0005
- resolution: "fine"
max_nodes: 100000
epochs: 5
lr: 0.0001
# Online learning (during optimization)
online:
enabled: false # Enable to learn from FEA during optimization
update_frequency: 10 # Update model every N FEA runs
quick_update_steps: 10
learning_rate: 0.0001
# Physics-informed loss weights
loss:
type: "physics" # Options: mse, relative, physics, max
weights:
data: 1.0 # Match FEA results
equilibrium: 0.1 # ∇·σ + f = 0
constitutive: 0.1 # σ = C:ε
boundary: 1.0 # u = 0 at fixed nodes
# Standard training parameters
hyperparameters:
epochs: 100
batch_size: 4
learning_rate: 0.001
weight_decay: 0.00001
# Optimization
optimizer:
type: "AdamW"
betas: [0.9, 0.999]
scheduler:
type: "ReduceLROnPlateau"
factor: 0.5
patience: 10
# Early stopping
early_stopping:
enabled: true
patience: 50
min_delta: 0.0001
# ============================================================================
# Data Pipeline
# ============================================================================
data:
# Data normalization
normalization:
enabled: true
method: "standard" # Options: standard, minmax
# Data augmentation
augmentation:
enabled: false # Enable for data augmentation
techniques:
- rotation # Rotate mesh randomly
- scaling # Scale loads
- noise # Add small noise to inputs
# Multi-resolution support
multi_resolution:
enabled: false
resolutions: ["coarse", "medium", "fine"]
# Caching
cache:
in_memory: false # Cache dataset in RAM (faster but memory-intensive)
disk_cache: true # Cache preprocessed graphs to disk
# ============================================================================
# Optimization Interface
# ============================================================================
optimization:
# Gradient-based optimization
use_gradients: true
# Uncertainty quantification
uncertainty:
enabled: false # Enable ensemble for uncertainty
ensemble_size: 5
threshold: 0.1 # Recommend FEA if uncertainty > threshold
# FEA fallback
fallback_to_fea:
enabled: true
conditions:
- high_uncertainty # Uncertainty > threshold
- extrapolation # Outside training distribution
- critical_design # Final validation
# Batch evaluation
batch_size: 100 # Evaluate designs in batches for speed
# ============================================================================
# Model Versioning & Deployment
# ============================================================================
deployment:
# Model versioning
versioning:
enabled: true
format: "semantic" # v1.0.0, v1.1.0, etc.
# Model registry
registry:
path: "models/"
naming: "{component_type}_v{version}.pt"
# Metadata tracking
metadata:
track_training_data: true
track_performance: true
track_hyperparameters: true
# Production settings
production:
device: "cuda" # cuda or cpu
batch_inference: true
max_batch_size: 100
# ============================================================================
# Integration with Atomizer
# ============================================================================
atomizer_integration:
# Dashboard integration
dashboard:
enabled: false # Future: Show field visualizations in dashboard
# Database integration
database:
enabled: false # Future: Store predictions in Atomizer DB
# API endpoints
api:
enabled: false # Future: REST API for predictions
port: 8000
# ============================================================================
# Monitoring & Logging
# ============================================================================
monitoring:
# TensorBoard
tensorboard:
enabled: true
log_dir: "runs/tensorboard"
# Weights & Biases (optional)
wandb:
enabled: false
project: "atomizerfield"
entity: "your_team"
# Logging level
logging:
level: "INFO" # DEBUG, INFO, WARNING, ERROR
file: "logs/atomizerfield.log"
# ============================================================================
# Experimental Features
# ============================================================================
experimental:
# Nonlinear analysis
nonlinear:
enabled: false
# Contact analysis
contact:
enabled: false
# Composite materials
composites:
enabled: false
# Modal analysis
modal:
enabled: false
# Topology optimization
topology:
enabled: false