Files
Atomizer/tests/test_api_verification.py

147 lines
4.3 KiB
Python
Raw Normal View History

"""
Quick API Verification Test
Minimal test to verify Anthropic API credentials work.
Uses smallest possible request to minimize credit usage.
Usage:
python tests/test_api_verification.py
Author: Antoine Letarte
Date: 2025-11-17
"""
import sys
import os
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from optimization_engine.llm_workflow_analyzer import LLMWorkflowAnalyzer
def test_api_connection():
"""
Test that API key works with minimal request.
This uses the smallest possible prompt to verify authentication
without consuming significant credits.
"""
print("=" * 80)
print("Anthropic API Verification Test")
print("=" * 80)
print()
# Get API key from environment, or use hardcoded key for testing
api_key = os.environ.get('ANTHROPIC_API_KEY')
if not api_key:
# Hardcoded API key for periodic verification tests
api_key = "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"
print("[INFO] Using hardcoded API key for testing")
else:
print("[INFO] Using API key from environment variable")
if not api_key:
print("[SKIP] No API key available")
print()
print("To test API integration:")
print(" 1. Set environment variable: export ANTHROPIC_API_KEY='sk-ant-...'")
print(" 2. Or API key is hardcoded in test script")
print()
return False
print(f"[OK] API key found (length: {len(api_key)} chars)")
print()
# Initialize analyzer with API key
print("Initializing LLMWorkflowAnalyzer with API...")
analyzer = LLMWorkflowAnalyzer(api_key=api_key, use_claude_code=False)
print("[OK] Analyzer initialized")
print()
# Minimal test request (very small to save credits)
minimal_request = "Extract displacement from OP2 file"
print("Sending minimal test request to API...")
print(f'Request: "{minimal_request}"')
print("(This uses minimal tokens to verify authentication)")
print()
try:
workflow = analyzer.analyze_request(minimal_request)
print("=" * 80)
print("[SUCCESS] API Response Received!")
print("=" * 80)
print()
# Show parsed workflow (should have at least engineering features)
eng_features = workflow.get('engineering_features', [])
print(f"Engineering Features Detected: {len(eng_features)}")
for i, feature in enumerate(eng_features, 1):
print(f" {i}. {feature.get('action')}: {feature.get('description', 'N/A')}")
print()
print("[OK] API is working correctly!")
print("[OK] LLMWorkflowAnalyzer can parse natural language requests")
print()
print("Credits used: ~100-200 tokens (minimal)")
print()
return True
except Exception as e:
print()
print("=" * 80)
print("[FAILED] API Request Failed")
print("=" * 80)
print()
print(f"Error: {e}")
print()
print("Possible causes:")
print(" - Invalid API key")
print(" - Network connectivity issue")
print(" - Anthropic API service issue")
print()
return False
def main():
"""Run API verification test."""
print()
success = test_api_connection()
print()
if success:
print("=" * 80)
print("RECOMMENDATION")
print("=" * 80)
print()
print("API is working! For development:")
print(" - Use Claude Code for daily development (no credits)")
print(" - Run this test periodically to verify API still works")
print(" - Use API mode only when needed for production testing")
print()
print("To use API mode:")
print(' python run_optimization.py --llm "request" --api-key "$ANTHROPIC_API_KEY"')
print()
else:
print("=" * 80)
print("FALLBACK")
print("=" * 80)
print()
print("API not configured, but that's OK!")
print(" - Continue using Claude Code for all development")
print(" - Hybrid approach works perfectly (Claude Code → JSON → Runner)")
print(" - Set up API key later when needed")
print()
return success
if __name__ == '__main__':
success = main()
sys.exit(0 if success else 1)