- Add embedded Claude Code terminal with xterm.js for full CLI experience - Create WebSocket PTY backend for real-time terminal communication - Add terminal status endpoint to check CLI availability - Update dashboard to use Claude Code terminal instead of API chat - Add optimization control panel with start/stop/validate actions - Add study context provider for global state management - Update frontend with new dependencies (xterm.js addons) - Comprehensive README documentation for all new features 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
277 lines
7.9 KiB
Python
277 lines
7.9 KiB
Python
"""
|
|
Claude Chat API Routes
|
|
|
|
Provides endpoints for AI-powered chat within the Atomizer dashboard.
|
|
"""
|
|
|
|
from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect
|
|
from fastapi.responses import StreamingResponse
|
|
from pydantic import BaseModel
|
|
from typing import Optional, List, Dict, Any
|
|
import json
|
|
import asyncio
|
|
import os
|
|
|
|
router = APIRouter()
|
|
|
|
# Check for API key
|
|
ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY")
|
|
|
|
|
|
class ChatMessage(BaseModel):
|
|
role: str # "user" or "assistant"
|
|
content: str
|
|
|
|
|
|
class ChatRequest(BaseModel):
|
|
message: str
|
|
study_id: Optional[str] = None
|
|
conversation_history: Optional[List[Dict[str, Any]]] = None
|
|
|
|
|
|
class ChatResponse(BaseModel):
|
|
response: str
|
|
tool_calls: Optional[List[Dict[str, Any]]] = None
|
|
study_id: Optional[str] = None
|
|
|
|
|
|
# Store active conversations (in production, use Redis or database)
|
|
_conversations: Dict[str, List[Dict[str, Any]]] = {}
|
|
|
|
|
|
@router.get("/status")
|
|
async def get_claude_status():
|
|
"""
|
|
Check if Claude API is configured and available
|
|
|
|
Returns:
|
|
JSON with API status
|
|
"""
|
|
has_key = bool(ANTHROPIC_API_KEY)
|
|
return {
|
|
"available": has_key,
|
|
"message": "Claude API is configured" if has_key else "ANTHROPIC_API_KEY not set"
|
|
}
|
|
|
|
|
|
@router.post("/chat", response_model=ChatResponse)
|
|
async def chat_with_claude(request: ChatRequest):
|
|
"""
|
|
Send a message to Claude with Atomizer context
|
|
|
|
Args:
|
|
request: ChatRequest with message, optional study_id, and conversation history
|
|
|
|
Returns:
|
|
ChatResponse with Claude's response and any tool calls made
|
|
"""
|
|
if not ANTHROPIC_API_KEY:
|
|
raise HTTPException(
|
|
status_code=503,
|
|
detail="Claude API not configured. Set ANTHROPIC_API_KEY environment variable."
|
|
)
|
|
|
|
try:
|
|
# Import here to avoid issues if anthropic not installed
|
|
from api.services.claude_agent import AtomizerClaudeAgent
|
|
|
|
# Create agent with study context
|
|
agent = AtomizerClaudeAgent(study_id=request.study_id)
|
|
|
|
# Convert conversation history format if needed
|
|
history = []
|
|
if request.conversation_history:
|
|
for msg in request.conversation_history:
|
|
if isinstance(msg.get('content'), str):
|
|
history.append(msg)
|
|
# Skip complex message formats for simplicity
|
|
|
|
# Get response
|
|
result = await agent.chat(request.message, history)
|
|
|
|
return ChatResponse(
|
|
response=result["response"],
|
|
tool_calls=result.get("tool_calls"),
|
|
study_id=request.study_id
|
|
)
|
|
|
|
except ImportError as e:
|
|
raise HTTPException(
|
|
status_code=503,
|
|
detail=f"Anthropic SDK not installed: {str(e)}"
|
|
)
|
|
except Exception as e:
|
|
raise HTTPException(
|
|
status_code=500,
|
|
detail=f"Chat error: {str(e)}"
|
|
)
|
|
|
|
|
|
@router.post("/chat/stream")
|
|
async def chat_stream(request: ChatRequest):
|
|
"""
|
|
Stream a response from Claude token by token
|
|
|
|
Args:
|
|
request: ChatRequest with message and optional context
|
|
|
|
Returns:
|
|
StreamingResponse with text/event-stream
|
|
"""
|
|
if not ANTHROPIC_API_KEY:
|
|
raise HTTPException(
|
|
status_code=503,
|
|
detail="Claude API not configured. Set ANTHROPIC_API_KEY environment variable."
|
|
)
|
|
|
|
async def generate():
|
|
try:
|
|
from api.services.claude_agent import AtomizerClaudeAgent
|
|
|
|
agent = AtomizerClaudeAgent(study_id=request.study_id)
|
|
|
|
# Convert history
|
|
history = []
|
|
if request.conversation_history:
|
|
for msg in request.conversation_history:
|
|
if isinstance(msg.get('content'), str):
|
|
history.append(msg)
|
|
|
|
# Stream response
|
|
async for token in agent.chat_stream(request.message, history):
|
|
yield f"data: {json.dumps({'token': token})}\n\n"
|
|
|
|
yield f"data: {json.dumps({'done': True})}\n\n"
|
|
|
|
except Exception as e:
|
|
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
|
|
|
return StreamingResponse(
|
|
generate(),
|
|
media_type="text/event-stream",
|
|
headers={
|
|
"Cache-Control": "no-cache",
|
|
"Connection": "keep-alive",
|
|
}
|
|
)
|
|
|
|
|
|
@router.websocket("/chat/ws")
|
|
async def websocket_chat(websocket: WebSocket):
|
|
"""
|
|
WebSocket endpoint for real-time chat
|
|
|
|
Message format (client -> server):
|
|
{"type": "message", "content": "user message", "study_id": "optional"}
|
|
|
|
Message format (server -> client):
|
|
{"type": "token", "content": "..."}
|
|
{"type": "done", "tool_calls": [...]}
|
|
{"type": "error", "message": "..."}
|
|
"""
|
|
await websocket.accept()
|
|
|
|
if not ANTHROPIC_API_KEY:
|
|
await websocket.send_json({
|
|
"type": "error",
|
|
"message": "Claude API not configured. Set ANTHROPIC_API_KEY environment variable."
|
|
})
|
|
await websocket.close()
|
|
return
|
|
|
|
conversation_history = []
|
|
|
|
try:
|
|
from api.services.claude_agent import AtomizerClaudeAgent
|
|
|
|
while True:
|
|
# Receive message from client
|
|
data = await websocket.receive_json()
|
|
|
|
if data.get("type") == "message":
|
|
content = data.get("content", "")
|
|
study_id = data.get("study_id")
|
|
|
|
if not content:
|
|
continue
|
|
|
|
# Create agent
|
|
agent = AtomizerClaudeAgent(study_id=study_id)
|
|
|
|
try:
|
|
# Use non-streaming chat for tool support
|
|
result = await agent.chat(content, conversation_history)
|
|
|
|
# Send response
|
|
await websocket.send_json({
|
|
"type": "response",
|
|
"content": result["response"],
|
|
"tool_calls": result.get("tool_calls", [])
|
|
})
|
|
|
|
# Update history (simplified - just user/assistant text)
|
|
conversation_history.append({"role": "user", "content": content})
|
|
conversation_history.append({"role": "assistant", "content": result["response"]})
|
|
|
|
except Exception as e:
|
|
await websocket.send_json({
|
|
"type": "error",
|
|
"message": str(e)
|
|
})
|
|
|
|
elif data.get("type") == "clear":
|
|
# Clear conversation history
|
|
conversation_history = []
|
|
await websocket.send_json({"type": "cleared"})
|
|
|
|
except WebSocketDisconnect:
|
|
pass
|
|
except Exception as e:
|
|
try:
|
|
await websocket.send_json({
|
|
"type": "error",
|
|
"message": str(e)
|
|
})
|
|
except:
|
|
pass
|
|
|
|
|
|
@router.get("/suggestions")
|
|
async def get_chat_suggestions(study_id: Optional[str] = None):
|
|
"""
|
|
Get contextual chat suggestions based on current study
|
|
|
|
Args:
|
|
study_id: Optional study to get suggestions for
|
|
|
|
Returns:
|
|
List of suggested prompts
|
|
"""
|
|
base_suggestions = [
|
|
"What's the status of my optimization?",
|
|
"Show me the best designs found",
|
|
"Compare the top 3 trials",
|
|
"What parameters have the most impact?",
|
|
"Explain the convergence behavior"
|
|
]
|
|
|
|
if study_id:
|
|
# Add study-specific suggestions
|
|
return {
|
|
"suggestions": [
|
|
f"Summarize the {study_id} study",
|
|
"What's the current best objective value?",
|
|
"Are there any failed trials? Why?",
|
|
"Show parameter sensitivity analysis",
|
|
"What should I try next to improve results?"
|
|
] + base_suggestions[:3]
|
|
}
|
|
|
|
return {
|
|
"suggestions": [
|
|
"List all available studies",
|
|
"Help me create a new study",
|
|
"What can you help me with?"
|
|
] + base_suggestions[:3]
|
|
}
|