feat: Add Claude Code terminal integration to dashboard

- Add embedded Claude Code terminal with xterm.js for full CLI experience
- Create WebSocket PTY backend for real-time terminal communication
- Add terminal status endpoint to check CLI availability
- Update dashboard to use Claude Code terminal instead of API chat
- Add optimization control panel with start/stop/validate actions
- Add study context provider for global state management
- Update frontend with new dependencies (xterm.js addons)
- Comprehensive README documentation for all new features

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Antoine
2025-12-04 15:02:13 -05:00
parent 8cbdbcad78
commit 9eed4d81eb
23 changed files with 5060 additions and 339 deletions

View File

@@ -12,7 +12,7 @@ import sys
# Add parent directory to path to import optimization_engine
sys.path.append(str(Path(__file__).parent.parent.parent.parent))
from api.routes import optimization
from api.routes import optimization, claude, terminal
from api.websocket import optimization_stream
# Create FastAPI app
@@ -34,6 +34,8 @@ app.add_middleware(
# Include routers
app.include_router(optimization.router, prefix="/api/optimization", tags=["optimization"])
app.include_router(optimization_stream.router, prefix="/api/ws", tags=["websocket"])
app.include_router(claude.router, prefix="/api/claude", tags=["claude"])
app.include_router(terminal.router, prefix="/api/terminal", tags=["terminal"])
@app.get("/")
async def root():

View File

@@ -0,0 +1,276 @@
"""
Claude Chat API Routes
Provides endpoints for AI-powered chat within the Atomizer dashboard.
"""
from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from typing import Optional, List, Dict, Any
import json
import asyncio
import os
router = APIRouter()
# Check for API key
ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY")
class ChatMessage(BaseModel):
role: str # "user" or "assistant"
content: str
class ChatRequest(BaseModel):
message: str
study_id: Optional[str] = None
conversation_history: Optional[List[Dict[str, Any]]] = None
class ChatResponse(BaseModel):
response: str
tool_calls: Optional[List[Dict[str, Any]]] = None
study_id: Optional[str] = None
# Store active conversations (in production, use Redis or database)
_conversations: Dict[str, List[Dict[str, Any]]] = {}
@router.get("/status")
async def get_claude_status():
"""
Check if Claude API is configured and available
Returns:
JSON with API status
"""
has_key = bool(ANTHROPIC_API_KEY)
return {
"available": has_key,
"message": "Claude API is configured" if has_key else "ANTHROPIC_API_KEY not set"
}
@router.post("/chat", response_model=ChatResponse)
async def chat_with_claude(request: ChatRequest):
"""
Send a message to Claude with Atomizer context
Args:
request: ChatRequest with message, optional study_id, and conversation history
Returns:
ChatResponse with Claude's response and any tool calls made
"""
if not ANTHROPIC_API_KEY:
raise HTTPException(
status_code=503,
detail="Claude API not configured. Set ANTHROPIC_API_KEY environment variable."
)
try:
# Import here to avoid issues if anthropic not installed
from api.services.claude_agent import AtomizerClaudeAgent
# Create agent with study context
agent = AtomizerClaudeAgent(study_id=request.study_id)
# Convert conversation history format if needed
history = []
if request.conversation_history:
for msg in request.conversation_history:
if isinstance(msg.get('content'), str):
history.append(msg)
# Skip complex message formats for simplicity
# Get response
result = await agent.chat(request.message, history)
return ChatResponse(
response=result["response"],
tool_calls=result.get("tool_calls"),
study_id=request.study_id
)
except ImportError as e:
raise HTTPException(
status_code=503,
detail=f"Anthropic SDK not installed: {str(e)}"
)
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Chat error: {str(e)}"
)
@router.post("/chat/stream")
async def chat_stream(request: ChatRequest):
"""
Stream a response from Claude token by token
Args:
request: ChatRequest with message and optional context
Returns:
StreamingResponse with text/event-stream
"""
if not ANTHROPIC_API_KEY:
raise HTTPException(
status_code=503,
detail="Claude API not configured. Set ANTHROPIC_API_KEY environment variable."
)
async def generate():
try:
from api.services.claude_agent import AtomizerClaudeAgent
agent = AtomizerClaudeAgent(study_id=request.study_id)
# Convert history
history = []
if request.conversation_history:
for msg in request.conversation_history:
if isinstance(msg.get('content'), str):
history.append(msg)
# Stream response
async for token in agent.chat_stream(request.message, history):
yield f"data: {json.dumps({'token': token})}\n\n"
yield f"data: {json.dumps({'done': True})}\n\n"
except Exception as e:
yield f"data: {json.dumps({'error': str(e)})}\n\n"
return StreamingResponse(
generate(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
}
)
@router.websocket("/chat/ws")
async def websocket_chat(websocket: WebSocket):
"""
WebSocket endpoint for real-time chat
Message format (client -> server):
{"type": "message", "content": "user message", "study_id": "optional"}
Message format (server -> client):
{"type": "token", "content": "..."}
{"type": "done", "tool_calls": [...]}
{"type": "error", "message": "..."}
"""
await websocket.accept()
if not ANTHROPIC_API_KEY:
await websocket.send_json({
"type": "error",
"message": "Claude API not configured. Set ANTHROPIC_API_KEY environment variable."
})
await websocket.close()
return
conversation_history = []
try:
from api.services.claude_agent import AtomizerClaudeAgent
while True:
# Receive message from client
data = await websocket.receive_json()
if data.get("type") == "message":
content = data.get("content", "")
study_id = data.get("study_id")
if not content:
continue
# Create agent
agent = AtomizerClaudeAgent(study_id=study_id)
try:
# Use non-streaming chat for tool support
result = await agent.chat(content, conversation_history)
# Send response
await websocket.send_json({
"type": "response",
"content": result["response"],
"tool_calls": result.get("tool_calls", [])
})
# Update history (simplified - just user/assistant text)
conversation_history.append({"role": "user", "content": content})
conversation_history.append({"role": "assistant", "content": result["response"]})
except Exception as e:
await websocket.send_json({
"type": "error",
"message": str(e)
})
elif data.get("type") == "clear":
# Clear conversation history
conversation_history = []
await websocket.send_json({"type": "cleared"})
except WebSocketDisconnect:
pass
except Exception as e:
try:
await websocket.send_json({
"type": "error",
"message": str(e)
})
except:
pass
@router.get("/suggestions")
async def get_chat_suggestions(study_id: Optional[str] = None):
"""
Get contextual chat suggestions based on current study
Args:
study_id: Optional study to get suggestions for
Returns:
List of suggested prompts
"""
base_suggestions = [
"What's the status of my optimization?",
"Show me the best designs found",
"Compare the top 3 trials",
"What parameters have the most impact?",
"Explain the convergence behavior"
]
if study_id:
# Add study-specific suggestions
return {
"suggestions": [
f"Summarize the {study_id} study",
"What's the current best objective value?",
"Are there any failed trials? Why?",
"Show parameter sensitivity analysis",
"What should I try next to improve results?"
] + base_suggestions[:3]
}
return {
"suggestions": [
"List all available studies",
"Help me create a new study",
"What can you help me with?"
] + base_suggestions[:3]
}

View File

@@ -5,12 +5,16 @@ Handles study status, history retrieval, and control operations
from fastapi import APIRouter, HTTPException, UploadFile, File, Form
from fastapi.responses import JSONResponse, FileResponse
from pydantic import BaseModel
from pathlib import Path
from typing import List, Dict, Optional
import json
import sys
import sqlite3
import shutil
import subprocess
import psutil
import signal
from datetime import datetime
# Add project root to path
@@ -1024,3 +1028,620 @@ async def get_study_report(study_id: str):
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to read study report: {str(e)}")
# ============================================================================
# Study README and Config Endpoints
# ============================================================================
@router.get("/studies/{study_id}/readme")
async def get_study_readme(study_id: str):
"""
Get the README.md file content for a study (from 1_setup folder)
Args:
study_id: Study identifier
Returns:
JSON with the markdown content
"""
try:
study_dir = STUDIES_DIR / study_id
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
# Look for README.md in various locations
readme_paths = [
study_dir / "README.md",
study_dir / "1_setup" / "README.md",
study_dir / "readme.md",
]
readme_content = None
readme_path = None
for path in readme_paths:
if path.exists():
readme_path = path
with open(path, 'r', encoding='utf-8') as f:
readme_content = f.read()
break
if readme_content is None:
# Generate a basic README from config if none exists
config_file = study_dir / "1_setup" / "optimization_config.json"
if not config_file.exists():
config_file = study_dir / "optimization_config.json"
if config_file.exists():
with open(config_file) as f:
config = json.load(f)
readme_content = f"""# {config.get('study_name', study_id)}
{config.get('description', 'No description available.')}
## Design Variables
{chr(10).join([f"- **{dv['name']}**: {dv.get('min', '?')} - {dv.get('max', '?')} {dv.get('units', '')}" for dv in config.get('design_variables', [])])}
## Objectives
{chr(10).join([f"- **{obj['name']}**: {obj.get('description', '')} ({obj.get('direction', 'minimize')})" for obj in config.get('objectives', [])])}
"""
else:
readme_content = f"# {study_id}\n\nNo README or configuration found for this study."
return {
"content": readme_content,
"path": str(readme_path) if readme_path else None,
"study_id": study_id
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to read README: {str(e)}")
@router.get("/studies/{study_id}/config")
async def get_study_config(study_id: str):
"""
Get the full optimization_config.json for a study
Args:
study_id: Study identifier
Returns:
JSON with the complete configuration
"""
try:
study_dir = STUDIES_DIR / study_id
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
# Look for config in various locations
config_file = study_dir / "1_setup" / "optimization_config.json"
if not config_file.exists():
config_file = study_dir / "optimization_config.json"
if not config_file.exists():
raise HTTPException(status_code=404, detail=f"Config file not found for study {study_id}")
with open(config_file) as f:
config = json.load(f)
return {
"config": config,
"path": str(config_file),
"study_id": study_id
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to read config: {str(e)}")
# ============================================================================
# Process Control Endpoints
# ============================================================================
# Track running processes by study_id
_running_processes: Dict[str, int] = {}
def _find_optimization_process(study_id: str) -> Optional[psutil.Process]:
"""Find a running optimization process for a given study"""
study_dir = STUDIES_DIR / study_id
for proc in psutil.process_iter(['pid', 'name', 'cmdline', 'cwd']):
try:
cmdline = proc.info.get('cmdline') or []
cmdline_str = ' '.join(cmdline) if cmdline else ''
# Check if this is a Python process running run_optimization.py for this study
if 'python' in cmdline_str.lower() and 'run_optimization' in cmdline_str:
if study_id in cmdline_str or str(study_dir) in cmdline_str:
return proc
except (psutil.NoSuchProcess, psutil.AccessDenied):
continue
return None
@router.get("/studies/{study_id}/process")
async def get_process_status(study_id: str):
"""
Get the process status for a study's optimization run
Args:
study_id: Study identifier
Returns:
JSON with process status (is_running, pid, iteration counts)
"""
try:
study_dir = STUDIES_DIR / study_id
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
# Check if process is running
proc = _find_optimization_process(study_id)
is_running = proc is not None
pid = proc.pid if proc else None
# Get iteration counts from database
results_dir = get_results_dir(study_dir)
study_db = results_dir / "study.db"
fea_count = 0
nn_count = 0
iteration = None
if study_db.exists():
try:
conn = sqlite3.connect(str(study_db))
cursor = conn.cursor()
# Count FEA trials (from main study or studies with "_fea" suffix)
cursor.execute("""
SELECT COUNT(*) FROM trials t
JOIN studies s ON t.study_id = s.study_id
WHERE t.state = 'COMPLETE'
AND (s.study_name LIKE '%_fea' OR s.study_name NOT LIKE '%_nn%')
""")
fea_count = cursor.fetchone()[0]
# Count NN trials
cursor.execute("""
SELECT COUNT(*) FROM trials t
JOIN studies s ON t.study_id = s.study_id
WHERE t.state = 'COMPLETE'
AND s.study_name LIKE '%_nn%'
""")
nn_count = cursor.fetchone()[0]
# Try to get current iteration from study names
cursor.execute("""
SELECT study_name FROM studies
WHERE study_name LIKE '%_iter%'
ORDER BY study_name DESC LIMIT 1
""")
result = cursor.fetchone()
if result:
import re
match = re.search(r'iter(\d+)', result[0])
if match:
iteration = int(match.group(1))
conn.close()
except Exception as e:
print(f"Warning: Failed to read database for process status: {e}")
return {
"is_running": is_running,
"pid": pid,
"iteration": iteration,
"fea_count": fea_count,
"nn_count": nn_count,
"study_id": study_id
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to get process status: {str(e)}")
class StartOptimizationRequest(BaseModel):
freshStart: bool = False
maxIterations: int = 100
feaBatchSize: int = 5
tuneTrials: int = 30
ensembleSize: int = 3
patience: int = 5
@router.post("/studies/{study_id}/start")
async def start_optimization(study_id: str, request: StartOptimizationRequest = None):
"""
Start the optimization process for a study
Args:
study_id: Study identifier
request: Optional start options
Returns:
JSON with process info
"""
try:
study_dir = STUDIES_DIR / study_id
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
# Check if already running
existing_proc = _find_optimization_process(study_id)
if existing_proc:
return {
"success": False,
"message": f"Optimization already running (PID: {existing_proc.pid})",
"pid": existing_proc.pid
}
# Find run_optimization.py
run_script = study_dir / "run_optimization.py"
if not run_script.exists():
raise HTTPException(status_code=404, detail=f"run_optimization.py not found for study {study_id}")
# Build command with arguments
python_exe = sys.executable
cmd = [python_exe, str(run_script), "--start"]
if request:
if request.freshStart:
cmd.append("--fresh")
cmd.extend(["--fea-batch", str(request.feaBatchSize)])
cmd.extend(["--tune-trials", str(request.tuneTrials)])
cmd.extend(["--ensemble-size", str(request.ensembleSize)])
cmd.extend(["--patience", str(request.patience)])
# Start process in background
proc = subprocess.Popen(
cmd,
cwd=str(study_dir),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True
)
_running_processes[study_id] = proc.pid
return {
"success": True,
"message": f"Optimization started successfully",
"pid": proc.pid,
"command": ' '.join(cmd)
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to start optimization: {str(e)}")
class StopRequest(BaseModel):
force: bool = True # Default to force kill
@router.post("/studies/{study_id}/stop")
async def stop_optimization(study_id: str, request: StopRequest = None):
"""
Stop the optimization process for a study (hard kill by default)
Args:
study_id: Study identifier
request.force: If True (default), immediately kill. If False, try graceful first.
Returns:
JSON with result
"""
if request is None:
request = StopRequest()
try:
study_dir = STUDIES_DIR / study_id
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
# Find running process
proc = _find_optimization_process(study_id)
if not proc:
return {
"success": False,
"message": "No running optimization process found"
}
pid = proc.pid
killed_pids = []
try:
# FIRST: Get all children BEFORE killing parent
children = []
try:
children = proc.children(recursive=True)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
if request.force:
# Hard kill: immediately kill parent and all children
# Kill children first (bottom-up)
for child in reversed(children):
try:
child.kill() # SIGKILL on Unix, TerminateProcess on Windows
killed_pids.append(child.pid)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
# Then kill parent
try:
proc.kill()
killed_pids.append(pid)
except psutil.NoSuchProcess:
pass
else:
# Graceful: try SIGTERM first, then force
try:
proc.terminate()
proc.wait(timeout=5)
except psutil.TimeoutExpired:
# Didn't stop gracefully, force kill
for child in reversed(children):
try:
child.kill()
killed_pids.append(child.pid)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
proc.kill()
killed_pids.append(pid)
except psutil.NoSuchProcess:
pass
# Clean up tracking
if study_id in _running_processes:
del _running_processes[study_id]
return {
"success": True,
"message": f"Optimization killed (PID: {pid}, +{len(children)} children)",
"pid": pid,
"killed_pids": killed_pids
}
except psutil.NoSuchProcess:
if study_id in _running_processes:
del _running_processes[study_id]
return {
"success": True,
"message": "Process already terminated"
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to stop optimization: {str(e)}")
class ValidateRequest(BaseModel):
topN: int = 5
@router.post("/studies/{study_id}/validate")
async def validate_optimization(study_id: str, request: ValidateRequest = None):
"""
Run final FEA validation on top NN predictions
Args:
study_id: Study identifier
request: Validation options (topN)
Returns:
JSON with process info
"""
try:
study_dir = STUDIES_DIR / study_id
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
# Check if optimization is still running
existing_proc = _find_optimization_process(study_id)
if existing_proc:
return {
"success": False,
"message": "Cannot validate while optimization is running. Stop optimization first."
}
# Look for final_validation.py script
validation_script = study_dir / "final_validation.py"
if not validation_script.exists():
# Fall back to run_optimization.py with --validate flag if script doesn't exist
run_script = study_dir / "run_optimization.py"
if not run_script.exists():
raise HTTPException(status_code=404, detail="No validation script found")
python_exe = sys.executable
top_n = request.topN if request else 5
cmd = [python_exe, str(run_script), "--validate", "--top", str(top_n)]
else:
python_exe = sys.executable
top_n = request.topN if request else 5
cmd = [python_exe, str(validation_script), "--top", str(top_n)]
# Start validation process
proc = subprocess.Popen(
cmd,
cwd=str(study_dir),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True
)
return {
"success": True,
"message": f"Validation started for top {top_n} NN predictions",
"pid": proc.pid,
"command": ' '.join(cmd)
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to start validation: {str(e)}")
# ============================================================================
# Optuna Dashboard Launch
# ============================================================================
_optuna_processes: Dict[str, subprocess.Popen] = {}
@router.post("/studies/{study_id}/optuna-dashboard")
async def launch_optuna_dashboard(study_id: str):
"""
Launch Optuna dashboard for a specific study
Args:
study_id: Study identifier
Returns:
JSON with dashboard URL and process info
"""
import time
import socket
def is_port_in_use(port: int) -> bool:
"""Check if a port is already in use"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
try:
study_dir = STUDIES_DIR / study_id
if not study_dir.exists():
raise HTTPException(status_code=404, detail=f"Study {study_id} not found")
results_dir = get_results_dir(study_dir)
study_db = results_dir / "study.db"
if not study_db.exists():
raise HTTPException(status_code=404, detail=f"No Optuna database found for study {study_id}")
port = 8081
# Check if dashboard is already running on this port
if is_port_in_use(port):
# Check if it's our process
if study_id in _optuna_processes:
proc = _optuna_processes[study_id]
if proc.poll() is None: # Still running
return {
"success": True,
"url": f"http://localhost:{port}",
"pid": proc.pid,
"message": "Optuna dashboard already running"
}
# Port in use but not by us - still return success since dashboard is available
return {
"success": True,
"url": f"http://localhost:{port}",
"pid": None,
"message": "Optuna dashboard already running on port 8081"
}
# Launch optuna-dashboard using Python script
python_exe = sys.executable
# Use absolute path with POSIX format for SQLite URL
abs_db_path = study_db.absolute().as_posix()
storage_url = f"sqlite:///{abs_db_path}"
# Create a small Python script to run optuna-dashboard
launch_script = f'''
from optuna_dashboard import run_server
run_server("{storage_url}", host="0.0.0.0", port={port})
'''
cmd = [python_exe, "-c", launch_script]
# On Windows, use CREATE_NEW_PROCESS_GROUP and DETACHED_PROCESS flags
import platform
if platform.system() == 'Windows':
# Windows-specific: create detached process
DETACHED_PROCESS = 0x00000008
CREATE_NEW_PROCESS_GROUP = 0x00000200
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
creationflags=DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP
)
else:
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True
)
_optuna_processes[study_id] = proc
# Wait for dashboard to start (check port repeatedly)
max_wait = 5 # seconds
start_time = time.time()
while time.time() - start_time < max_wait:
if is_port_in_use(port):
return {
"success": True,
"url": f"http://localhost:{port}",
"pid": proc.pid,
"message": "Optuna dashboard launched successfully"
}
# Check if process died
if proc.poll() is not None:
stderr = ""
try:
stderr = proc.stderr.read().decode() if proc.stderr else ""
except:
pass
return {
"success": False,
"message": f"Failed to start Optuna dashboard: {stderr}"
}
time.sleep(0.5)
# Timeout - process might still be starting
if proc.poll() is None:
return {
"success": True,
"url": f"http://localhost:{port}",
"pid": proc.pid,
"message": "Optuna dashboard starting (may take a moment)"
}
else:
stderr = ""
try:
stderr = proc.stderr.read().decode() if proc.stderr else ""
except:
pass
return {
"success": False,
"message": f"Failed to start Optuna dashboard: {stderr}"
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to launch Optuna dashboard: {str(e)}")

View File

@@ -0,0 +1,289 @@
"""
Terminal WebSocket for Claude Code CLI
Provides a PTY-based terminal that runs Claude Code in the dashboard.
"""
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
from typing import Optional
import asyncio
import subprocess
import sys
import os
import signal
import json
router = APIRouter()
# Store active terminal sessions
_terminal_sessions: dict = {}
class TerminalSession:
"""Manages a Claude Code terminal session."""
def __init__(self, session_id: str, working_dir: str):
self.session_id = session_id
self.working_dir = working_dir
self.process: Optional[subprocess.Popen] = None
self.websocket: Optional[WebSocket] = None
self._read_task: Optional[asyncio.Task] = None
self._running = False
async def start(self, websocket: WebSocket):
"""Start the Claude Code process."""
self.websocket = websocket
self._running = True
# Determine the claude command
# On Windows, claude is typically installed via npm and available in PATH
claude_cmd = "claude"
# Check if we're on Windows
is_windows = sys.platform == "win32"
try:
if is_windows:
# On Windows, use subprocess with pipes
# We need to use cmd.exe to get proper terminal behavior
self.process = subprocess.Popen(
["cmd.exe", "/c", claude_cmd],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.working_dir,
bufsize=0,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP,
env={**os.environ, "FORCE_COLOR": "1", "TERM": "xterm-256color"}
)
else:
# On Unix, we can use pty
import pty
master_fd, slave_fd = pty.openpty()
self.process = subprocess.Popen(
[claude_cmd],
stdin=slave_fd,
stdout=slave_fd,
stderr=slave_fd,
cwd=self.working_dir,
env={**os.environ, "TERM": "xterm-256color"}
)
os.close(slave_fd)
self._master_fd = master_fd
# Start reading output
self._read_task = asyncio.create_task(self._read_output())
await self.websocket.send_json({
"type": "started",
"message": f"Claude Code started in {self.working_dir}"
})
except FileNotFoundError:
await self.websocket.send_json({
"type": "error",
"message": "Claude Code CLI not found. Please install it with: npm install -g @anthropic-ai/claude-code"
})
self._running = False
except Exception as e:
await self.websocket.send_json({
"type": "error",
"message": f"Failed to start Claude Code: {str(e)}"
})
self._running = False
async def _read_output(self):
"""Read output from the process and send to WebSocket."""
is_windows = sys.platform == "win32"
try:
while self._running and self.process and self.process.poll() is None:
if is_windows:
# Read from stdout pipe
if self.process.stdout:
# Use asyncio to read without blocking
loop = asyncio.get_event_loop()
try:
data = await loop.run_in_executor(
None,
lambda: self.process.stdout.read(1024)
)
if data:
await self.websocket.send_json({
"type": "output",
"data": data.decode("utf-8", errors="replace")
})
except Exception:
break
else:
# Read from PTY master
loop = asyncio.get_event_loop()
try:
data = await loop.run_in_executor(
None,
lambda: os.read(self._master_fd, 1024)
)
if data:
await self.websocket.send_json({
"type": "output",
"data": data.decode("utf-8", errors="replace")
})
except OSError:
break
await asyncio.sleep(0.01)
# Process ended
if self.websocket:
exit_code = self.process.poll() if self.process else -1
await self.websocket.send_json({
"type": "exit",
"code": exit_code
})
except Exception as e:
if self.websocket:
try:
await self.websocket.send_json({
"type": "error",
"message": str(e)
})
except:
pass
async def write(self, data: str):
"""Write input to the process."""
if not self.process or not self._running:
return
is_windows = sys.platform == "win32"
try:
if is_windows:
if self.process.stdin:
self.process.stdin.write(data.encode())
self.process.stdin.flush()
else:
os.write(self._master_fd, data.encode())
except Exception as e:
if self.websocket:
await self.websocket.send_json({
"type": "error",
"message": f"Write error: {str(e)}"
})
async def resize(self, cols: int, rows: int):
"""Resize the terminal (Unix only)."""
if sys.platform != "win32" and hasattr(self, '_master_fd'):
import struct
import fcntl
import termios
winsize = struct.pack("HHHH", rows, cols, 0, 0)
fcntl.ioctl(self._master_fd, termios.TIOCSWINSZ, winsize)
async def stop(self):
"""Stop the terminal session."""
self._running = False
if self._read_task:
self._read_task.cancel()
try:
await self._read_task
except asyncio.CancelledError:
pass
if self.process:
try:
if sys.platform == "win32":
self.process.terminate()
else:
os.kill(self.process.pid, signal.SIGTERM)
self.process.wait(timeout=2)
except:
try:
self.process.kill()
except:
pass
if sys.platform != "win32" and hasattr(self, '_master_fd'):
try:
os.close(self._master_fd)
except:
pass
@router.websocket("/claude")
async def claude_terminal(websocket: WebSocket, working_dir: str = None):
"""
WebSocket endpoint for Claude Code terminal.
Query params:
working_dir: Directory to start Claude Code in (defaults to Atomizer root)
Client -> Server messages:
{"type": "input", "data": "user input text"}
{"type": "resize", "cols": 80, "rows": 24}
Server -> Client messages:
{"type": "started", "message": "..."}
{"type": "output", "data": "terminal output"}
{"type": "exit", "code": 0}
{"type": "error", "message": "..."}
"""
await websocket.accept()
# Default to Atomizer root directory
if not working_dir:
working_dir = str(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
))))
# Create session
session_id = f"claude-{id(websocket)}"
session = TerminalSession(session_id, working_dir)
_terminal_sessions[session_id] = session
try:
# Start Claude Code
await session.start(websocket)
# Handle incoming messages
while session._running:
try:
message = await websocket.receive_json()
if message.get("type") == "input":
await session.write(message.get("data", ""))
elif message.get("type") == "resize":
await session.resize(
message.get("cols", 80),
message.get("rows", 24)
)
elif message.get("type") == "stop":
break
except WebSocketDisconnect:
break
except Exception as e:
await websocket.send_json({
"type": "error",
"message": str(e)
})
finally:
await session.stop()
_terminal_sessions.pop(session_id, None)
@router.get("/status")
async def terminal_status():
"""Check if Claude Code CLI is available."""
import shutil
claude_path = shutil.which("claude")
return {
"available": claude_path is not None,
"path": claude_path,
"message": "Claude Code CLI is available" if claude_path else "Claude Code CLI not found. Install with: npm install -g @anthropic-ai/claude-code"
}

View File

@@ -0,0 +1,7 @@
"""
Atomizer Dashboard Services
"""
from .claude_agent import AtomizerClaudeAgent
__all__ = ['AtomizerClaudeAgent']

View File

@@ -0,0 +1,715 @@
"""
Atomizer Claude Agent Service
Provides Claude AI integration with Atomizer-specific tools for:
- Analyzing optimization results
- Querying trial data
- Modifying configurations
- Creating new studies
- Explaining FEA/Zernike concepts
"""
import os
import json
import sqlite3
from pathlib import Path
from typing import Optional, List, Dict, Any, AsyncGenerator
from datetime import datetime
import anthropic
# Base studies directory
STUDIES_DIR = Path(__file__).parent.parent.parent.parent.parent / "studies"
ATOMIZER_ROOT = Path(__file__).parent.parent.parent.parent.parent
class AtomizerClaudeAgent:
"""Claude agent with Atomizer-specific tools and context"""
def __init__(self, study_id: Optional[str] = None):
self.client = anthropic.Anthropic()
self.study_id = study_id
self.study_dir = STUDIES_DIR / study_id if study_id else None
self.tools = self._define_tools()
self.system_prompt = self._build_system_prompt()
def _build_system_prompt(self) -> str:
"""Build context-aware system prompt for Atomizer"""
base_prompt = """You are Claude Code embedded in the Atomizer FEA optimization dashboard.
## Your Role
You help engineers with structural optimization using NX Nastran simulations. You can:
1. **Analyze Results** - Interpret optimization progress, identify trends, explain convergence
2. **Query Data** - Fetch trial data, compare configurations, find best designs
3. **Modify Settings** - Update design variable bounds, objectives, constraints
4. **Explain Concepts** - FEA, Zernike polynomials, wavefront error, stress analysis
5. **Troubleshoot** - Debug failed trials, identify issues, suggest fixes
## Atomizer Context
- Atomizer uses Optuna for Bayesian optimization
- Studies can use FEA-only or hybrid FEA/Neural surrogate approaches
- Results are stored in SQLite databases (study.db)
- Design variables are NX expressions in CAD models
- Objectives include stress, displacement, frequency, Zernike WFE
## Guidelines
- Be concise but thorough
- Use technical language appropriate for engineers
- When showing data, format it clearly (tables, lists)
- If uncertain, say so and suggest how to verify
- Proactively suggest next steps or insights
"""
# Add study-specific context if available
if self.study_id and self.study_dir and self.study_dir.exists():
context = self._get_study_context()
base_prompt += f"\n## Current Study: {self.study_id}\n{context}\n"
else:
base_prompt += "\n## Current Study: None selected\nAsk the user to select a study or help them create a new one.\n"
return base_prompt
def _get_study_context(self) -> str:
"""Get context information about the current study"""
context_parts = []
# Try to load config
config_path = self.study_dir / "1_setup" / "optimization_config.json"
if not config_path.exists():
config_path = self.study_dir / "optimization_config.json"
if config_path.exists():
try:
with open(config_path) as f:
config = json.load(f)
# Design variables
dvs = config.get('design_variables', [])
if dvs:
context_parts.append(f"**Design Variables ({len(dvs)})**: " +
", ".join(dv['name'] for dv in dvs[:5]) +
("..." if len(dvs) > 5 else ""))
# Objectives
objs = config.get('objectives', [])
if objs:
context_parts.append(f"**Objectives ({len(objs)})**: " +
", ".join(f"{o['name']} ({o.get('direction', 'minimize')})"
for o in objs))
# Constraints
constraints = config.get('constraints', [])
if constraints:
context_parts.append(f"**Constraints**: " +
", ".join(c['name'] for c in constraints))
except Exception:
pass
# Try to get trial count from database
results_dir = self.study_dir / "2_results"
if not results_dir.exists():
results_dir = self.study_dir / "3_results"
db_path = results_dir / "study.db" if results_dir.exists() else None
if db_path and db_path.exists():
try:
conn = sqlite3.connect(str(db_path))
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM trials WHERE state='COMPLETE'")
trial_count = cursor.fetchone()[0]
context_parts.append(f"**Completed Trials**: {trial_count}")
# Get best value
cursor.execute("""
SELECT MIN(value) FROM trial_values
WHERE trial_id IN (SELECT trial_id FROM trials WHERE state='COMPLETE')
""")
best = cursor.fetchone()[0]
if best is not None:
context_parts.append(f"**Best Objective**: {best:.6f}")
conn.close()
except Exception:
pass
return "\n".join(context_parts) if context_parts else "No configuration found."
def _define_tools(self) -> List[Dict[str, Any]]:
"""Define Atomizer-specific tools for Claude"""
return [
{
"name": "read_study_config",
"description": "Read the optimization configuration for the current or specified study. Returns design variables, objectives, constraints, and algorithm settings.",
"input_schema": {
"type": "object",
"properties": {
"study_id": {
"type": "string",
"description": "Study ID to read config from. Uses current study if not specified."
}
},
"required": []
}
},
{
"name": "query_trials",
"description": "Query trial data from the Optuna database. Can filter by state, source (FEA/NN), objective value range, or parameter values.",
"input_schema": {
"type": "object",
"properties": {
"study_id": {
"type": "string",
"description": "Study ID to query. Uses current study if not specified."
},
"state": {
"type": "string",
"enum": ["COMPLETE", "PRUNED", "FAIL", "RUNNING", "all"],
"description": "Filter by trial state. Default: COMPLETE"
},
"source": {
"type": "string",
"enum": ["fea", "nn", "all"],
"description": "Filter by trial source (FEA simulation or Neural Network). Default: all"
},
"limit": {
"type": "integer",
"description": "Maximum number of trials to return. Default: 20"
},
"order_by": {
"type": "string",
"enum": ["value_asc", "value_desc", "trial_id_asc", "trial_id_desc"],
"description": "Sort order. Default: value_asc (best first)"
}
},
"required": []
}
},
{
"name": "get_trial_details",
"description": "Get detailed information about a specific trial including all parameters, objective values, and user attributes.",
"input_schema": {
"type": "object",
"properties": {
"study_id": {
"type": "string",
"description": "Study ID. Uses current study if not specified."
},
"trial_id": {
"type": "integer",
"description": "The trial number to get details for."
}
},
"required": ["trial_id"]
}
},
{
"name": "compare_trials",
"description": "Compare two or more trials side-by-side, showing parameter differences and objective values.",
"input_schema": {
"type": "object",
"properties": {
"study_id": {
"type": "string",
"description": "Study ID. Uses current study if not specified."
},
"trial_ids": {
"type": "array",
"items": {"type": "integer"},
"description": "List of trial IDs to compare (2-5 trials)."
}
},
"required": ["trial_ids"]
}
},
{
"name": "get_optimization_summary",
"description": "Get a high-level summary of the optimization progress including trial counts, convergence status, best designs, and parameter sensitivity.",
"input_schema": {
"type": "object",
"properties": {
"study_id": {
"type": "string",
"description": "Study ID. Uses current study if not specified."
}
},
"required": []
}
},
{
"name": "read_study_readme",
"description": "Read the README.md documentation for a study, which contains the engineering problem description, mathematical formulation, and methodology.",
"input_schema": {
"type": "object",
"properties": {
"study_id": {
"type": "string",
"description": "Study ID. Uses current study if not specified."
}
},
"required": []
}
},
{
"name": "list_studies",
"description": "List all available optimization studies with their status and trial counts.",
"input_schema": {
"type": "object",
"properties": {},
"required": []
}
}
]
def _execute_tool(self, tool_name: str, tool_input: Dict[str, Any]) -> str:
"""Execute an Atomizer tool and return the result"""
try:
if tool_name == "read_study_config":
return self._tool_read_config(tool_input.get('study_id'))
elif tool_name == "query_trials":
return self._tool_query_trials(tool_input)
elif tool_name == "get_trial_details":
return self._tool_get_trial_details(tool_input)
elif tool_name == "compare_trials":
return self._tool_compare_trials(tool_input)
elif tool_name == "get_optimization_summary":
return self._tool_get_summary(tool_input.get('study_id'))
elif tool_name == "read_study_readme":
return self._tool_read_readme(tool_input.get('study_id'))
elif tool_name == "list_studies":
return self._tool_list_studies()
else:
return f"Unknown tool: {tool_name}"
except Exception as e:
return f"Error executing {tool_name}: {str(e)}"
def _get_study_dir(self, study_id: Optional[str]) -> Path:
"""Get study directory, using current study if not specified"""
sid = study_id or self.study_id
if not sid:
raise ValueError("No study specified and no current study selected")
study_dir = STUDIES_DIR / sid
if not study_dir.exists():
raise ValueError(f"Study '{sid}' not found")
return study_dir
def _get_db_path(self, study_id: Optional[str]) -> Path:
"""Get database path for a study"""
study_dir = self._get_study_dir(study_id)
for results_dir_name in ["2_results", "3_results"]:
db_path = study_dir / results_dir_name / "study.db"
if db_path.exists():
return db_path
raise ValueError(f"No database found for study")
def _tool_read_config(self, study_id: Optional[str]) -> str:
"""Read study configuration"""
study_dir = self._get_study_dir(study_id)
config_path = study_dir / "1_setup" / "optimization_config.json"
if not config_path.exists():
config_path = study_dir / "optimization_config.json"
if not config_path.exists():
return "No configuration file found for this study."
with open(config_path) as f:
config = json.load(f)
# Format nicely
result = [f"# Configuration for {study_id or self.study_id}\n"]
# Design variables
dvs = config.get('design_variables', [])
if dvs:
result.append("## Design Variables")
result.append("| Name | Min | Max | Baseline | Units |")
result.append("|------|-----|-----|----------|-------|")
for dv in dvs:
result.append(f"| {dv['name']} | {dv.get('min', '-')} | {dv.get('max', '-')} | {dv.get('baseline', '-')} | {dv.get('units', '-')} |")
# Objectives
objs = config.get('objectives', [])
if objs:
result.append("\n## Objectives")
result.append("| Name | Direction | Weight | Target | Units |")
result.append("|------|-----------|--------|--------|-------|")
for obj in objs:
result.append(f"| {obj['name']} | {obj.get('direction', 'minimize')} | {obj.get('weight', 1.0)} | {obj.get('target', '-')} | {obj.get('units', '-')} |")
# Constraints
constraints = config.get('constraints', [])
if constraints:
result.append("\n## Constraints")
for c in constraints:
result.append(f"- **{c['name']}**: {c.get('type', 'bound')} {c.get('max_value', c.get('min_value', ''))} {c.get('units', '')}")
return "\n".join(result)
def _tool_query_trials(self, params: Dict[str, Any]) -> str:
"""Query trials from database"""
db_path = self._get_db_path(params.get('study_id'))
state = params.get('state', 'COMPLETE')
source = params.get('source', 'all')
limit = params.get('limit', 20)
order_by = params.get('order_by', 'value_asc')
conn = sqlite3.connect(str(db_path))
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Build query
query = """
SELECT t.trial_id, t.state, tv.value,
GROUP_CONCAT(tp.param_name || '=' || ROUND(tp.param_value, 4), ', ') as params
FROM trials t
LEFT JOIN trial_values tv ON t.trial_id = tv.trial_id
LEFT JOIN trial_params tp ON t.trial_id = tp.trial_id
"""
conditions = []
if state != 'all':
conditions.append(f"t.state = '{state}'")
if conditions:
query += " WHERE " + " AND ".join(conditions)
query += " GROUP BY t.trial_id"
# Order
if order_by == 'value_asc':
query += " ORDER BY tv.value ASC"
elif order_by == 'value_desc':
query += " ORDER BY tv.value DESC"
elif order_by == 'trial_id_desc':
query += " ORDER BY t.trial_id DESC"
else:
query += " ORDER BY t.trial_id ASC"
query += f" LIMIT {limit}"
cursor.execute(query)
rows = cursor.fetchall()
conn.close()
if not rows:
return "No trials found matching the criteria."
# Filter by source if needed (check user_attrs)
if source != 'all':
# Would need another query to filter by trial_source attr
pass
# Format results
result = [f"# Trials (showing {len(rows)}/{limit} max)\n"]
result.append("| Trial | State | Objective | Parameters |")
result.append("|-------|-------|-----------|------------|")
for row in rows:
value = f"{row['value']:.6f}" if row['value'] else "N/A"
params = row['params'][:50] + "..." if row['params'] and len(row['params']) > 50 else (row['params'] or "")
result.append(f"| {row['trial_id']} | {row['state']} | {value} | {params} |")
return "\n".join(result)
def _tool_get_trial_details(self, params: Dict[str, Any]) -> str:
"""Get detailed trial information"""
db_path = self._get_db_path(params.get('study_id'))
trial_id = params['trial_id']
conn = sqlite3.connect(str(db_path))
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Get trial info
cursor.execute("SELECT * FROM trials WHERE trial_id = ?", (trial_id,))
trial = cursor.fetchone()
if not trial:
conn.close()
return f"Trial {trial_id} not found."
result = [f"# Trial {trial_id} Details\n"]
result.append(f"**State**: {trial['state']}")
# Get objective value
cursor.execute("SELECT value FROM trial_values WHERE trial_id = ?", (trial_id,))
value_row = cursor.fetchone()
if value_row:
result.append(f"**Objective Value**: {value_row['value']:.6f}")
# Get parameters
cursor.execute("SELECT param_name, param_value FROM trial_params WHERE trial_id = ? ORDER BY param_name", (trial_id,))
params_rows = cursor.fetchall()
if params_rows:
result.append("\n## Parameters")
result.append("| Parameter | Value |")
result.append("|-----------|-------|")
for p in params_rows:
result.append(f"| {p['param_name']} | {p['param_value']:.6f} |")
# Get user attributes
cursor.execute("SELECT key, value_json FROM trial_user_attributes WHERE trial_id = ?", (trial_id,))
attrs = cursor.fetchall()
if attrs:
result.append("\n## Attributes")
for attr in attrs:
try:
value = json.loads(attr['value_json'])
if isinstance(value, float):
result.append(f"- **{attr['key']}**: {value:.6f}")
else:
result.append(f"- **{attr['key']}**: {value}")
except:
result.append(f"- **{attr['key']}**: {attr['value_json']}")
conn.close()
return "\n".join(result)
def _tool_compare_trials(self, params: Dict[str, Any]) -> str:
"""Compare multiple trials"""
db_path = self._get_db_path(params.get('study_id'))
trial_ids = params['trial_ids']
if len(trial_ids) < 2:
return "Need at least 2 trials to compare."
if len(trial_ids) > 5:
return "Maximum 5 trials for comparison."
conn = sqlite3.connect(str(db_path))
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
result = ["# Trial Comparison\n"]
# Get all parameter names
cursor.execute("SELECT DISTINCT param_name FROM trial_params ORDER BY param_name")
param_names = [row['param_name'] for row in cursor.fetchall()]
# Build comparison table header
header = "| Parameter | " + " | ".join(f"Trial {tid}" for tid in trial_ids) + " |"
separator = "|-----------|" + "|".join("-" * 10 for _ in trial_ids) + "|"
result.append(header)
result.append(separator)
# Objective values row
obj_values = []
for tid in trial_ids:
cursor.execute("SELECT value FROM trial_values WHERE trial_id = ?", (tid,))
row = cursor.fetchone()
obj_values.append(f"{row['value']:.4f}" if row else "N/A")
result.append("| **Objective** | " + " | ".join(obj_values) + " |")
# Parameter rows
for pname in param_names:
values = []
for tid in trial_ids:
cursor.execute("SELECT param_value FROM trial_params WHERE trial_id = ? AND param_name = ?", (tid, pname))
row = cursor.fetchone()
values.append(f"{row['param_value']:.4f}" if row else "N/A")
result.append(f"| {pname} | " + " | ".join(values) + " |")
conn.close()
return "\n".join(result)
def _tool_get_summary(self, study_id: Optional[str]) -> str:
"""Get optimization summary"""
db_path = self._get_db_path(study_id)
conn = sqlite3.connect(str(db_path))
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
result = [f"# Optimization Summary\n"]
# Trial counts by state
cursor.execute("SELECT state, COUNT(*) as count FROM trials GROUP BY state")
states = {row['state']: row['count'] for row in cursor.fetchall()}
result.append("## Trial Counts")
total = sum(states.values())
result.append(f"- **Total**: {total}")
for state, count in states.items():
result.append(f"- {state}: {count}")
# Best trial
cursor.execute("""
SELECT t.trial_id, tv.value
FROM trials t
JOIN trial_values tv ON t.trial_id = tv.trial_id
WHERE t.state = 'COMPLETE'
ORDER BY tv.value ASC LIMIT 1
""")
best = cursor.fetchone()
if best:
result.append(f"\n## Best Trial")
result.append(f"- **Trial ID**: {best['trial_id']}")
result.append(f"- **Objective**: {best['value']:.6f}")
# FEA vs NN counts
cursor.execute("""
SELECT value_json, COUNT(*) as count
FROM trial_user_attributes
WHERE key = 'trial_source'
GROUP BY value_json
""")
sources = cursor.fetchall()
if sources:
result.append("\n## Trial Sources")
for src in sources:
source_name = json.loads(src['value_json']) if src['value_json'] else 'unknown'
result.append(f"- **{source_name}**: {src['count']}")
conn.close()
return "\n".join(result)
def _tool_read_readme(self, study_id: Optional[str]) -> str:
"""Read study README"""
study_dir = self._get_study_dir(study_id)
readme_path = study_dir / "README.md"
if not readme_path.exists():
return "No README.md found for this study."
content = readme_path.read_text(encoding='utf-8')
# Truncate if too long
if len(content) > 8000:
content = content[:8000] + "\n\n... (truncated)"
return content
def _tool_list_studies(self) -> str:
"""List all studies"""
if not STUDIES_DIR.exists():
return "Studies directory not found."
result = ["# Available Studies\n"]
result.append("| Study | Status | Trials |")
result.append("|-------|--------|--------|")
for study_dir in sorted(STUDIES_DIR.iterdir()):
if not study_dir.is_dir():
continue
study_id = study_dir.name
# Check for database
trial_count = 0
for results_dir_name in ["2_results", "3_results"]:
db_path = study_dir / results_dir_name / "study.db"
if db_path.exists():
try:
conn = sqlite3.connect(str(db_path))
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM trials WHERE state='COMPLETE'")
trial_count = cursor.fetchone()[0]
conn.close()
except:
pass
break
# Determine status
status = "ready" if trial_count > 0 else "not_started"
result.append(f"| {study_id} | {status} | {trial_count} |")
return "\n".join(result)
async def chat(self, message: str, conversation_history: Optional[List[Dict]] = None) -> Dict[str, Any]:
"""
Process a chat message with tool use support
Args:
message: User's message
conversation_history: Previous messages for context
Returns:
Dict with response text and any tool calls made
"""
messages = conversation_history.copy() if conversation_history else []
messages.append({"role": "user", "content": message})
tool_calls_made = []
# Loop to handle tool use
while True:
response = self.client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=4096,
system=self.system_prompt,
tools=self.tools,
messages=messages
)
# Check if we need to handle tool use
if response.stop_reason == "tool_use":
# Process tool calls
assistant_content = response.content
tool_results = []
for block in assistant_content:
if block.type == "tool_use":
tool_name = block.name
tool_input = block.input
tool_id = block.id
# Execute the tool
result = self._execute_tool(tool_name, tool_input)
tool_calls_made.append({
"tool": tool_name,
"input": tool_input,
"result_preview": result[:200] + "..." if len(result) > 200 else result
})
tool_results.append({
"type": "tool_result",
"tool_use_id": tool_id,
"content": result
})
# Add assistant response and tool results to messages
messages.append({"role": "assistant", "content": assistant_content})
messages.append({"role": "user", "content": tool_results})
else:
# No more tool use, extract final response
final_text = ""
for block in response.content:
if hasattr(block, 'text'):
final_text += block.text
return {
"response": final_text,
"tool_calls": tool_calls_made,
"conversation": messages + [{"role": "assistant", "content": response.content}]
}
async def chat_stream(self, message: str, conversation_history: Optional[List[Dict]] = None) -> AsyncGenerator[str, None]:
"""
Stream a chat response token by token
Args:
message: User's message
conversation_history: Previous messages
Yields:
Response tokens as they arrive
"""
messages = conversation_history.copy() if conversation_history else []
messages.append({"role": "user", "content": message})
# For streaming, we'll do a simpler approach without tool use for now
# (Tool use with streaming is more complex)
with self.client.messages.stream(
model="claude-sonnet-4-20250514",
max_tokens=4096,
system=self.system_prompt,
messages=messages
) as stream:
for text in stream.text_stream:
yield text