Tier 2 dev workflow: Windows test runner + result sync
- run_tests.bat: double-click test runner with JSON result capture - run_script.bat: run any script with output capture - test_results/ folder for Syncthing-based result sharing - Auto-mark NX-dependent tests for --quick mode - pytest-json-report for structured results
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -142,3 +142,7 @@ C:*
|
|||||||
|
|
||||||
# project-context-sync (auto-generated, local only)
|
# project-context-sync (auto-generated, local only)
|
||||||
PROJECT_STATE.md
|
PROJECT_STATE.md
|
||||||
|
|
||||||
|
# Test results (synced via Syncthing, not git)
|
||||||
|
test_results/*.json
|
||||||
|
test_results/*.log
|
||||||
|
|||||||
@@ -68,3 +68,4 @@ pytest-cov>=4.1.0
|
|||||||
black>=23.12.0
|
black>=23.12.0
|
||||||
ruff>=0.1.0
|
ruff>=0.1.0
|
||||||
mypy>=1.8.0
|
mypy>=1.8.0
|
||||||
|
pytest-json-report
|
||||||
|
|||||||
91
run_script.bat
Normal file
91
run_script.bat
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
@echo off
|
||||||
|
REM ============================================================================
|
||||||
|
REM Atomizer Script Runner — Run any Python script with result capture
|
||||||
|
REM ============================================================================
|
||||||
|
REM Usage:
|
||||||
|
REM run_script.bat path\to\script.py [args...]
|
||||||
|
REM
|
||||||
|
REM Results sync back to Mario via Syncthing in test_results/
|
||||||
|
REM ============================================================================
|
||||||
|
|
||||||
|
setlocal enabledelayedexpansion
|
||||||
|
|
||||||
|
set "ATOMIZER_ROOT=%~dp0"
|
||||||
|
set "RESULTS_DIR=%ATOMIZER_ROOT%test_results"
|
||||||
|
set "SCRIPT=%~1"
|
||||||
|
|
||||||
|
if "%SCRIPT%"=="" (
|
||||||
|
echo Usage: run_script.bat path\to\script.py [args...]
|
||||||
|
pause
|
||||||
|
exit /b 1
|
||||||
|
)
|
||||||
|
|
||||||
|
REM Timestamp
|
||||||
|
for /f %%i in ('python -c "from datetime import datetime; print(datetime.now().strftime('%%Y-%%m-%%d_%%H-%%M-%%S'))"') do set "TIMESTAMP=%%i"
|
||||||
|
|
||||||
|
set "LOG_FILE=%RESULTS_DIR%\script_%TIMESTAMP%.log"
|
||||||
|
set "RUN_FILE=%RESULTS_DIR%\script_%TIMESTAMP%.json"
|
||||||
|
|
||||||
|
if not exist "%RESULTS_DIR%" mkdir "%RESULTS_DIR%"
|
||||||
|
|
||||||
|
echo.
|
||||||
|
echo ============================================================================
|
||||||
|
echo Running: %SCRIPT%
|
||||||
|
echo %date% %time%
|
||||||
|
echo ============================================================================
|
||||||
|
echo.
|
||||||
|
|
||||||
|
cd /d "%ATOMIZER_ROOT%"
|
||||||
|
|
||||||
|
REM Shift past first arg to get remaining args
|
||||||
|
set "EXTRA_ARGS="
|
||||||
|
shift
|
||||||
|
:argloop
|
||||||
|
if not "%~1"=="" (
|
||||||
|
set "EXTRA_ARGS=!EXTRA_ARGS! %~1"
|
||||||
|
shift
|
||||||
|
goto argloop
|
||||||
|
)
|
||||||
|
|
||||||
|
REM Run the script
|
||||||
|
python "%SCRIPT%" %EXTRA_ARGS% > "%LOG_FILE%" 2>&1
|
||||||
|
set "EXIT_CODE=!errorlevel!"
|
||||||
|
|
||||||
|
REM Also echo to console
|
||||||
|
type "%LOG_FILE%"
|
||||||
|
|
||||||
|
echo.
|
||||||
|
echo ============================================================================
|
||||||
|
echo Exit code: %EXIT_CODE%
|
||||||
|
|
||||||
|
REM Generate result JSON
|
||||||
|
python -c "
|
||||||
|
import json, os
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
with open(r'%LOG_FILE%', 'r', encoding='utf-8', errors='replace') as f:
|
||||||
|
log = f.read()
|
||||||
|
|
||||||
|
# Grab last 50 lines for quick review
|
||||||
|
lines = log.strip().split('\n')
|
||||||
|
tail = lines[-50:] if len(lines) > 50 else lines
|
||||||
|
|
||||||
|
result = {
|
||||||
|
'timestamp': datetime.now().isoformat(),
|
||||||
|
'type': 'script',
|
||||||
|
'script': '%SCRIPT%',
|
||||||
|
'args': '%EXTRA_ARGS%'.strip(),
|
||||||
|
'exit_code': int('%EXIT_CODE%'),
|
||||||
|
'status': 'OK' if int('%EXIT_CODE%') == 0 else 'ERROR',
|
||||||
|
'output_tail': tail,
|
||||||
|
'log_file': os.path.basename(r'%LOG_FILE%'),
|
||||||
|
'total_lines': len(lines)
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(r'%RUN_FILE%', 'w') as f:
|
||||||
|
json.dump(result, f, indent=2)
|
||||||
|
"
|
||||||
|
|
||||||
|
echo Results saved. Will sync to Mario via Syncthing.
|
||||||
|
echo ============================================================================
|
||||||
|
pause
|
||||||
188
run_tests.bat
Normal file
188
run_tests.bat
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
@echo off
|
||||||
|
REM ============================================================================
|
||||||
|
REM Atomizer Test Runner — Tier 2 Dev Workflow
|
||||||
|
REM ============================================================================
|
||||||
|
REM Double-click this to run tests. Results sync back to Mario via Syncthing.
|
||||||
|
REM
|
||||||
|
REM Usage:
|
||||||
|
REM run_tests.bat — run all tests
|
||||||
|
REM run_tests.bat test_spec_api — run specific test file
|
||||||
|
REM run_tests.bat unit — run unit tests folder
|
||||||
|
REM run_tests.bat --quick — fast smoke test (no slow/NX tests)
|
||||||
|
REM run_tests.bat --nx — NX-dependent tests only
|
||||||
|
REM ============================================================================
|
||||||
|
|
||||||
|
setlocal enabledelayedexpansion
|
||||||
|
|
||||||
|
REM === CONFIG ===
|
||||||
|
set "ATOMIZER_ROOT=%~dp0"
|
||||||
|
set "RESULTS_DIR=%ATOMIZER_ROOT%test_results"
|
||||||
|
set "PYTHON=python"
|
||||||
|
|
||||||
|
REM Timestamp for this run
|
||||||
|
for /f "tokens=1-6 delims=/:. " %%a in ("%date% %time%") do (
|
||||||
|
set "TIMESTAMP=%%a-%%b-%%c_%%d-%%e-%%f"
|
||||||
|
)
|
||||||
|
REM Fallback: use a simpler approach
|
||||||
|
for /f %%i in ('python -c "from datetime import datetime; print(datetime.now().strftime('%%Y-%%m-%%d_%%H-%%M-%%S'))"') do set "TIMESTAMP=%%i"
|
||||||
|
|
||||||
|
set "RUN_FILE=%RESULTS_DIR%\run_%TIMESTAMP%.json"
|
||||||
|
set "LOG_FILE=%RESULTS_DIR%\run_%TIMESTAMP%.log"
|
||||||
|
|
||||||
|
REM Create results dir if needed
|
||||||
|
if not exist "%RESULTS_DIR%" mkdir "%RESULTS_DIR%"
|
||||||
|
|
||||||
|
echo.
|
||||||
|
echo ============================================================================
|
||||||
|
echo ATOMIZER TEST RUNNER
|
||||||
|
echo %date% %time%
|
||||||
|
echo ============================================================================
|
||||||
|
echo.
|
||||||
|
|
||||||
|
REM === Gather system info ===
|
||||||
|
echo Gathering environment info...
|
||||||
|
for /f "delims=" %%v in ('python --version 2^>^&1') do set "PYTHON_VER=%%v"
|
||||||
|
for /f "delims=" %%v in ('python -c "import sys; print(sys.executable)"') do set "PYTHON_EXE=%%v"
|
||||||
|
|
||||||
|
REM Check if NX is available
|
||||||
|
set "NX_AVAILABLE=false"
|
||||||
|
python -c "import NXOpen" 2>nul && set "NX_AVAILABLE=true"
|
||||||
|
|
||||||
|
REM === Determine what to run ===
|
||||||
|
set "TEST_TARGET=tests/"
|
||||||
|
set "PYTEST_ARGS=-v --tb=short"
|
||||||
|
set "TEST_MODE=all"
|
||||||
|
|
||||||
|
if "%~1"=="--quick" (
|
||||||
|
set "PYTEST_ARGS=-v --tb=short -m \"not slow and not nx\""
|
||||||
|
set "TEST_MODE=quick"
|
||||||
|
) else if "%~1"=="--nx" (
|
||||||
|
set "PYTEST_ARGS=-v --tb=short -m nx"
|
||||||
|
set "TEST_MODE=nx-only"
|
||||||
|
) else if not "%~1"=="" (
|
||||||
|
set "TEST_TARGET=tests/%~1"
|
||||||
|
if not exist "%ATOMIZER_ROOT%tests\%~1" (
|
||||||
|
set "TEST_TARGET=tests/%~1.py"
|
||||||
|
)
|
||||||
|
set "TEST_MODE=targeted"
|
||||||
|
)
|
||||||
|
|
||||||
|
echo Mode: %TEST_MODE%
|
||||||
|
echo Target: %TEST_TARGET%
|
||||||
|
echo Python: %PYTHON_VER%
|
||||||
|
echo NX: %NX_AVAILABLE%
|
||||||
|
echo Results: %RUN_FILE%
|
||||||
|
echo.
|
||||||
|
|
||||||
|
REM === Run tests ===
|
||||||
|
echo Running tests...
|
||||||
|
echo ============================================================================
|
||||||
|
|
||||||
|
cd /d "%ATOMIZER_ROOT%"
|
||||||
|
|
||||||
|
REM Run pytest with JSON report if available, otherwise parse output
|
||||||
|
python -m pytest %TEST_TARGET% %PYTEST_ARGS% --json-report --json-report-file="%RESULTS_DIR%\_pytest_report.json" 2>nul
|
||||||
|
if errorlevel 1 (
|
||||||
|
REM json-report plugin might not be installed, run without it
|
||||||
|
python -m pytest %TEST_TARGET% %PYTEST_ARGS% > "%LOG_FILE%" 2>&1
|
||||||
|
set "EXIT_CODE=!errorlevel!"
|
||||||
|
) else (
|
||||||
|
python -m pytest %TEST_TARGET% %PYTEST_ARGS% > "%LOG_FILE%" 2>&1
|
||||||
|
set "EXIT_CODE=!errorlevel!"
|
||||||
|
)
|
||||||
|
|
||||||
|
echo.
|
||||||
|
echo ============================================================================
|
||||||
|
|
||||||
|
REM === Generate results JSON ===
|
||||||
|
python -c "
|
||||||
|
import json, sys, os, platform
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
log_path = r'%LOG_FILE%'
|
||||||
|
report_path = r'%RESULTS_DIR%\_pytest_report.json'
|
||||||
|
run_file = r'%RUN_FILE%'
|
||||||
|
|
||||||
|
# Read log
|
||||||
|
with open(log_path, 'r', encoding='utf-8', errors='replace') as f:
|
||||||
|
log_content = f.read()
|
||||||
|
|
||||||
|
# Parse basic stats from log
|
||||||
|
lines = log_content.split('\n')
|
||||||
|
summary_line = ''
|
||||||
|
for line in reversed(lines):
|
||||||
|
if 'passed' in line or 'failed' in line or 'error' in line:
|
||||||
|
summary_line = line.strip()
|
||||||
|
break
|
||||||
|
|
||||||
|
# Try to get JSON report
|
||||||
|
json_report = None
|
||||||
|
if os.path.exists(report_path):
|
||||||
|
try:
|
||||||
|
with open(report_path) as f:
|
||||||
|
json_report = json.load(f)
|
||||||
|
except: pass
|
||||||
|
|
||||||
|
# Extract failures
|
||||||
|
failures = []
|
||||||
|
in_failure = False
|
||||||
|
current_failure = []
|
||||||
|
for line in lines:
|
||||||
|
if line.startswith('FAILED ') or line.startswith('ERROR '):
|
||||||
|
failures.append(line.strip())
|
||||||
|
elif '_ FAILURES _' in line or '_ ERRORS _' in line:
|
||||||
|
in_failure = True
|
||||||
|
elif in_failure and line.startswith('='):
|
||||||
|
if current_failure:
|
||||||
|
failures.append('\n'.join(current_failure))
|
||||||
|
current_failure = []
|
||||||
|
in_failure = False
|
||||||
|
elif in_failure:
|
||||||
|
current_failure.append(line)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
'timestamp': datetime.now().isoformat(),
|
||||||
|
'exit_code': int('%EXIT_CODE%'),
|
||||||
|
'mode': '%TEST_MODE%',
|
||||||
|
'target': '%TEST_TARGET%',
|
||||||
|
'python': '%PYTHON_VER%',
|
||||||
|
'python_exe': r'%PYTHON_EXE%',
|
||||||
|
'nx_available': %NX_AVAILABLE%,
|
||||||
|
'platform': platform.platform(),
|
||||||
|
'summary': summary_line,
|
||||||
|
'failures': failures[:20], # cap at 20
|
||||||
|
'log_file': os.path.basename(log_path),
|
||||||
|
'status': 'PASS' if int('%EXIT_CODE%') == 0 else 'FAIL'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add JSON report summary if available
|
||||||
|
if json_report and 'summary' in json_report:
|
||||||
|
result['pytest_summary'] = json_report['summary']
|
||||||
|
|
||||||
|
with open(run_file, 'w') as f:
|
||||||
|
json.dump(result, f, indent=2)
|
||||||
|
|
||||||
|
print()
|
||||||
|
print(f'Status: {result[\"status\"]}')
|
||||||
|
print(f'Summary: {summary_line}')
|
||||||
|
print(f'Results saved to: {os.path.basename(run_file)}')
|
||||||
|
"
|
||||||
|
|
||||||
|
echo.
|
||||||
|
|
||||||
|
REM === Also write a latest.json pointer ===
|
||||||
|
echo {"latest": "run_%TIMESTAMP%.json", "timestamp": "%TIMESTAMP%"} > "%RESULTS_DIR%\latest.json"
|
||||||
|
|
||||||
|
REM === Cleanup old pytest report ===
|
||||||
|
if exist "%RESULTS_DIR%\_pytest_report.json" del "%RESULTS_DIR%\_pytest_report.json"
|
||||||
|
|
||||||
|
echo.
|
||||||
|
if %EXIT_CODE% EQU 0 (
|
||||||
|
echo ALL TESTS PASSED
|
||||||
|
) else (
|
||||||
|
echo SOME TESTS FAILED — check results
|
||||||
|
)
|
||||||
|
echo.
|
||||||
|
echo Results will sync to Mario via Syncthing.
|
||||||
|
echo ============================================================================
|
||||||
|
pause
|
||||||
@@ -9,6 +9,20 @@ from pathlib import Path
|
|||||||
# Add project root to path
|
# Add project root to path
|
||||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_configure(config):
|
||||||
|
"""Register custom markers."""
|
||||||
|
config.addinivalue_line("markers", "nx: requires NX Open (skip with -m 'not nx')")
|
||||||
|
config.addinivalue_line("markers", "slow: slow tests (skip with -m 'not slow')")
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_collection_modifyitems(config, items):
|
||||||
|
"""Auto-mark tests that import NXOpen."""
|
||||||
|
for item in items:
|
||||||
|
# Auto-mark files with 'nx' or 'journal' in name
|
||||||
|
if 'nx' in item.nodeid.lower() or 'journal' in item.nodeid.lower():
|
||||||
|
item.add_marker(pytest.mark.nx)
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def sample_study_dir(tmp_path):
|
def sample_study_dir(tmp_path):
|
||||||
"""Create a temporary study directory structure."""
|
"""Create a temporary study directory structure."""
|
||||||
|
|||||||
Reference in New Issue
Block a user