feat: Merge Atomizer-Field neural network module into main repository
Permanently integrates the Atomizer-Field GNN surrogate system: - neural_models/: Graph Neural Network for FEA field prediction - batch_parser.py: Parse training data from FEA exports - train.py: Neural network training pipeline - predict.py: Inference engine for fast predictions This enables 600x-2200x speedup over traditional FEA by replacing expensive simulations with millisecond neural network predictions. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
402
atomizer-field/test_suite.py
Normal file
402
atomizer-field/test_suite.py
Normal file
@@ -0,0 +1,402 @@
|
||||
"""
|
||||
test_suite.py
|
||||
Master test orchestrator for AtomizerField
|
||||
|
||||
AtomizerField Testing Framework v1.0
|
||||
Comprehensive validation from basic functionality to full neural FEA predictions.
|
||||
|
||||
Usage:
|
||||
python test_suite.py --quick # 5-minute smoke tests
|
||||
python test_suite.py --physics # Physics validation tests
|
||||
python test_suite.py --learning # Learning capability tests
|
||||
python test_suite.py --full # Complete test suite (1 hour)
|
||||
|
||||
Testing Strategy:
|
||||
1. Smoke Tests (5 min) → Verify basic functionality
|
||||
2. Physics Tests (15 min) → Validate physics constraints
|
||||
3. Learning Tests (30 min) → Confirm learning capability
|
||||
4. Integration Tests (1 hour) → Full system validation
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
import time
|
||||
from pathlib import Path
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
# Add project root to path
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
# Test results storage
|
||||
TEST_RESULTS = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'tests': [],
|
||||
'summary': {
|
||||
'total': 0,
|
||||
'passed': 0,
|
||||
'failed': 0,
|
||||
'skipped': 0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class TestRunner:
|
||||
"""
|
||||
Test orchestrator that runs all tests in sequence
|
||||
"""
|
||||
|
||||
def __init__(self, mode='quick'):
|
||||
"""
|
||||
Initialize test runner
|
||||
|
||||
Args:
|
||||
mode (str): Testing mode ('quick', 'physics', 'learning', 'full')
|
||||
"""
|
||||
self.mode = mode
|
||||
self.results_dir = Path('test_results')
|
||||
self.results_dir.mkdir(exist_ok=True)
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"AtomizerField Test Suite v1.0")
|
||||
print(f"Mode: {mode.upper()}")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
def run_test(self, test_name, test_func, description):
|
||||
"""
|
||||
Run a single test and record results
|
||||
|
||||
Args:
|
||||
test_name (str): Name of test
|
||||
test_func (callable): Test function to run
|
||||
description (str): Test description
|
||||
|
||||
Returns:
|
||||
bool: True if passed
|
||||
"""
|
||||
print(f"[TEST] {test_name}")
|
||||
print(f" Description: {description}")
|
||||
|
||||
start_time = time.time()
|
||||
result = {
|
||||
'name': test_name,
|
||||
'description': description,
|
||||
'status': 'unknown',
|
||||
'duration': 0,
|
||||
'message': '',
|
||||
'metrics': {}
|
||||
}
|
||||
|
||||
try:
|
||||
test_result = test_func()
|
||||
|
||||
if test_result is None or test_result is True:
|
||||
result['status'] = 'PASS'
|
||||
result['message'] = 'Test passed successfully'
|
||||
print(f" Status: [PASS]")
|
||||
TEST_RESULTS['summary']['passed'] += 1
|
||||
elif isinstance(test_result, dict):
|
||||
result['status'] = test_result.get('status', 'PASS')
|
||||
result['message'] = test_result.get('message', '')
|
||||
result['metrics'] = test_result.get('metrics', {})
|
||||
|
||||
if result['status'] == 'PASS':
|
||||
print(f" Status: [PASS]")
|
||||
TEST_RESULTS['summary']['passed'] += 1
|
||||
else:
|
||||
print(f" Status: [FAIL]")
|
||||
print(f" Reason: {result['message']}")
|
||||
TEST_RESULTS['summary']['failed'] += 1
|
||||
else:
|
||||
result['status'] = 'FAIL'
|
||||
result['message'] = str(test_result)
|
||||
print(f" Status: [FAIL]")
|
||||
TEST_RESULTS['summary']['failed'] += 1
|
||||
|
||||
except Exception as e:
|
||||
result['status'] = 'FAIL'
|
||||
result['message'] = f"Exception: {str(e)}"
|
||||
print(f" Status: [FAIL]")
|
||||
print(f" Error: {str(e)}")
|
||||
TEST_RESULTS['summary']['failed'] += 1
|
||||
|
||||
result['duration'] = time.time() - start_time
|
||||
print(f" Duration: {result['duration']:.2f}s\n")
|
||||
|
||||
TEST_RESULTS['tests'].append(result)
|
||||
TEST_RESULTS['summary']['total'] += 1
|
||||
|
||||
return result['status'] == 'PASS'
|
||||
|
||||
def run_smoke_tests(self):
|
||||
"""
|
||||
Quick smoke tests (5 minutes)
|
||||
Verify basic functionality
|
||||
"""
|
||||
print(f"\n{'='*60}")
|
||||
print("PHASE 1: SMOKE TESTS (5 minutes)")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
from tests import test_synthetic
|
||||
|
||||
# Test 1: Model creation
|
||||
self.run_test(
|
||||
"Model Creation",
|
||||
test_synthetic.test_model_creation,
|
||||
"Verify GNN model can be instantiated"
|
||||
)
|
||||
|
||||
# Test 2: Forward pass
|
||||
self.run_test(
|
||||
"Forward Pass",
|
||||
test_synthetic.test_forward_pass,
|
||||
"Verify model can process dummy data"
|
||||
)
|
||||
|
||||
# Test 3: Loss computation
|
||||
self.run_test(
|
||||
"Loss Computation",
|
||||
test_synthetic.test_loss_computation,
|
||||
"Verify loss functions work"
|
||||
)
|
||||
|
||||
def run_physics_tests(self):
|
||||
"""
|
||||
Physics validation tests (15 minutes)
|
||||
Ensure physics constraints work
|
||||
"""
|
||||
print(f"\n{'='*60}")
|
||||
print("PHASE 2: PHYSICS VALIDATION (15 minutes)")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
from tests import test_physics
|
||||
|
||||
# Test 1: Cantilever beam
|
||||
self.run_test(
|
||||
"Cantilever Beam (Analytical)",
|
||||
test_physics.test_cantilever_analytical,
|
||||
"Compare with δ = FL³/3EI solution"
|
||||
)
|
||||
|
||||
# Test 2: Equilibrium
|
||||
self.run_test(
|
||||
"Equilibrium Check",
|
||||
test_physics.test_equilibrium,
|
||||
"Verify force balance (∇·σ + f = 0)"
|
||||
)
|
||||
|
||||
# Test 3: Energy conservation
|
||||
self.run_test(
|
||||
"Energy Conservation",
|
||||
test_physics.test_energy_conservation,
|
||||
"Verify strain energy = work done"
|
||||
)
|
||||
|
||||
def run_learning_tests(self):
|
||||
"""
|
||||
Learning capability tests (30 minutes)
|
||||
Confirm network can learn
|
||||
"""
|
||||
print(f"\n{'='*60}")
|
||||
print("PHASE 3: LEARNING CAPABILITY (30 minutes)")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
from tests import test_learning
|
||||
|
||||
# Test 1: Memorization
|
||||
self.run_test(
|
||||
"Memorization Test",
|
||||
test_learning.test_memorization,
|
||||
"Can network memorize small dataset?"
|
||||
)
|
||||
|
||||
# Test 2: Interpolation
|
||||
self.run_test(
|
||||
"Interpolation Test",
|
||||
test_learning.test_interpolation,
|
||||
"Can network interpolate between training points?"
|
||||
)
|
||||
|
||||
# Test 3: Pattern recognition
|
||||
self.run_test(
|
||||
"Pattern Recognition",
|
||||
test_learning.test_pattern_recognition,
|
||||
"Does network learn thickness → stress relationship?"
|
||||
)
|
||||
|
||||
def run_integration_tests(self):
|
||||
"""
|
||||
Full integration tests (1 hour)
|
||||
Complete system validation
|
||||
"""
|
||||
print(f"\n{'='*60}")
|
||||
print("PHASE 4: INTEGRATION TESTS (1 hour)")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
from tests import test_predictions
|
||||
|
||||
# Test 1: Parser validation
|
||||
self.run_test(
|
||||
"Parser Validation",
|
||||
test_predictions.test_parser,
|
||||
"Verify data parsing works correctly"
|
||||
)
|
||||
|
||||
# Test 2: Training pipeline
|
||||
self.run_test(
|
||||
"Training Pipeline",
|
||||
test_predictions.test_training,
|
||||
"Verify complete training workflow"
|
||||
)
|
||||
|
||||
# Test 3: Prediction accuracy
|
||||
self.run_test(
|
||||
"Prediction Accuracy",
|
||||
test_predictions.test_prediction_accuracy,
|
||||
"Compare neural vs FEA predictions"
|
||||
)
|
||||
|
||||
def print_summary(self):
|
||||
"""Print test summary"""
|
||||
summary = TEST_RESULTS['summary']
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print("TEST SUMMARY")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
total = summary['total']
|
||||
passed = summary['passed']
|
||||
failed = summary['failed']
|
||||
|
||||
pass_rate = (passed / total * 100) if total > 0 else 0
|
||||
|
||||
print(f"Total Tests: {total}")
|
||||
print(f" + Passed: {passed}")
|
||||
print(f" - Failed: {failed}")
|
||||
print(f" Pass Rate: {pass_rate:.1f}%\n")
|
||||
|
||||
if failed == 0:
|
||||
print("[SUCCESS] ALL TESTS PASSED - SYSTEM READY!")
|
||||
else:
|
||||
print(f"[ERROR] {failed} TEST(S) FAILED - REVIEW REQUIRED")
|
||||
|
||||
print(f"\n{'='*60}\n")
|
||||
|
||||
def save_results(self):
|
||||
"""Save test results to JSON"""
|
||||
results_file = self.results_dir / f'test_results_{self.mode}_{int(time.time())}.json'
|
||||
|
||||
with open(results_file, 'w') as f:
|
||||
json.dump(TEST_RESULTS, f, indent=2)
|
||||
|
||||
print(f"Results saved to: {results_file}")
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Run test suite based on mode
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
if self.mode == 'quick':
|
||||
self.run_smoke_tests()
|
||||
|
||||
elif self.mode == 'physics':
|
||||
self.run_smoke_tests()
|
||||
self.run_physics_tests()
|
||||
|
||||
elif self.mode == 'learning':
|
||||
self.run_smoke_tests()
|
||||
self.run_physics_tests()
|
||||
self.run_learning_tests()
|
||||
|
||||
elif self.mode == 'full':
|
||||
self.run_smoke_tests()
|
||||
self.run_physics_tests()
|
||||
self.run_learning_tests()
|
||||
self.run_integration_tests()
|
||||
|
||||
total_time = time.time() - start_time
|
||||
|
||||
# Print summary
|
||||
self.print_summary()
|
||||
|
||||
print(f"Total testing time: {total_time/60:.1f} minutes\n")
|
||||
|
||||
# Save results
|
||||
self.save_results()
|
||||
|
||||
# Return exit code
|
||||
return 0 if TEST_RESULTS['summary']['failed'] == 0 else 1
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='AtomizerField Test Suite',
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Quick smoke tests (5 min)
|
||||
python test_suite.py --quick
|
||||
|
||||
# Physics validation (15 min)
|
||||
python test_suite.py --physics
|
||||
|
||||
# Learning tests (30 min)
|
||||
python test_suite.py --learning
|
||||
|
||||
# Full test suite (1 hour)
|
||||
python test_suite.py --full
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--quick',
|
||||
action='store_true',
|
||||
help='Run quick smoke tests (5 minutes)'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--physics',
|
||||
action='store_true',
|
||||
help='Run physics validation tests (15 minutes)'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--learning',
|
||||
action='store_true',
|
||||
help='Run learning capability tests (30 minutes)'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--full',
|
||||
action='store_true',
|
||||
help='Run complete test suite (1 hour)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Determine mode
|
||||
if args.full:
|
||||
mode = 'full'
|
||||
elif args.learning:
|
||||
mode = 'learning'
|
||||
elif args.physics:
|
||||
mode = 'physics'
|
||||
elif args.quick:
|
||||
mode = 'quick'
|
||||
else:
|
||||
# Default to quick if no mode specified
|
||||
mode = 'quick'
|
||||
print("No mode specified, defaulting to --quick")
|
||||
|
||||
# Run tests
|
||||
runner = TestRunner(mode=mode)
|
||||
exit_code = runner.run()
|
||||
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user