Files
Atomizer/optimization_engine/devloop/planning.py

452 lines
14 KiB
Python
Raw Normal View History

"""
Gemini Planner - Strategic planning and test design using Gemini Pro.
Handles:
- Implementation planning from objectives
- Test scenario generation
- Architecture decisions
- Risk assessment
"""
import asyncio
import json
import logging
import os
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
@dataclass
class PlanTask:
"""A single task in the implementation plan."""
id: str
description: str
file: Optional[str] = None
code_hint: Optional[str] = None
priority: str = "medium"
dependencies: List[str] = None
def __post_init__(self):
if self.dependencies is None:
self.dependencies = []
@dataclass
class TestScenario:
"""A test scenario for dashboard verification."""
id: str
name: str
type: str # "api", "browser", "cli", "filesystem"
steps: List[Dict] = None
expected_outcome: Dict = None
def __post_init__(self):
if self.steps is None:
self.steps = []
if self.expected_outcome is None:
self.expected_outcome = {"status": "pass"}
class GeminiPlanner:
"""
Strategic planner using Gemini Pro.
Generates:
- Implementation tasks for Claude Code
- Test scenarios for dashboard verification
- Architecture decisions
- Risk assessments
"""
def __init__(self, config: Optional[Dict] = None):
"""
Initialize the planner.
Args:
config: Configuration with API key and model settings
"""
self.config = config or {}
self._client = None
self._model = None
@property
def client(self):
"""Lazy-load Gemini client."""
if self._client is None:
try:
import google.generativeai as genai
api_key = self.config.get("api_key") or os.environ.get("GEMINI_API_KEY")
if not api_key:
raise ValueError("GEMINI_API_KEY not set")
genai.configure(api_key=api_key)
self._client = genai
model_name = self.config.get("model", "gemini-2.0-flash-thinking-exp-01-21")
self._model = genai.GenerativeModel(model_name)
logger.info(f"Gemini client initialized with model: {model_name}")
except ImportError:
logger.warning("google-generativeai not installed, using mock planner")
self._client = "mock"
return self._client
async def create_plan(self, request: Dict) -> Dict:
"""
Create an implementation plan from an objective.
Args:
request: Dict with:
- objective: What to achieve
- context: Additional context (study spec, etc.)
- previous_results: Results from last iteration
- historical_learnings: Relevant LAC insights
Returns:
Plan dict with tasks, test_scenarios, risks
"""
objective = request.get("objective", "")
context = request.get("context", {})
previous_results = request.get("previous_results")
learnings = request.get("historical_learnings", [])
# Build planning prompt
prompt = self._build_planning_prompt(objective, context, previous_results, learnings)
# Get response from Gemini
if self.client == "mock":
plan = self._mock_plan(objective, context)
else:
plan = await self._query_gemini(prompt)
return plan
def _build_planning_prompt(
self,
objective: str,
context: Dict,
previous_results: Optional[Dict],
learnings: List[Dict],
) -> str:
"""Build the planning prompt for Gemini."""
prompt = f"""## Atomizer Development Planning Session
### Objective
{objective}
### Context
{json.dumps(context, indent=2) if context else "No additional context provided."}
### Previous Iteration Results
{json.dumps(previous_results, indent=2) if previous_results else "First iteration - no previous results."}
### Historical Learnings (from LAC)
{self._format_learnings(learnings)}
### Required Outputs
Generate a detailed implementation plan in JSON format with the following structure:
```json
{{
"objective": "{objective}",
"approach": "Brief description of the approach",
"tasks": [
{{
"id": "task_001",
"description": "What to do",
"file": "path/to/file.py",
"code_hint": "Pseudo-code or pattern to use",
"priority": "high|medium|low",
"dependencies": ["task_000"]
}}
],
"test_scenarios": [
{{
"id": "test_001",
"name": "Test name",
"type": "api|browser|cli|filesystem",
"steps": [
{{"action": "navigate", "target": "/canvas"}}
],
"expected_outcome": {{"status": "pass", "assertions": []}}
}}
],
"risks": [
{{
"description": "What could go wrong",
"mitigation": "How to handle it",
"severity": "high|medium|low"
}}
],
"acceptance_criteria": [
"Criteria 1",
"Criteria 2"
]
}}
```
### Guidelines
1. **Tasks should be specific and actionable** - Each task should be completable by Claude Code
2. **Test scenarios must be verifiable** - Use dashboard endpoints and browser actions
3. **Consider Atomizer architecture** - Use existing extractors (SYS_12), follow AtomizerSpec v2.0
4. **Apply historical learnings** - Avoid known failure patterns
### Important Atomizer Patterns
- Studies use `atomizer_spec.json` (AtomizerSpec v2.0)
- Design variables have bounds: {{"min": X, "max": Y}}
- Objectives use extractors: E1 (displacement), E3 (stress), E4 (mass)
- Constraints define limits with operators: <, >, <=, >=
Output ONLY the JSON plan, no additional text.
"""
return prompt
def _format_learnings(self, learnings: List[Dict]) -> str:
"""Format LAC learnings for the prompt."""
if not learnings:
return "No relevant historical learnings."
formatted = []
for learning in learnings[:5]: # Limit to 5 most relevant
formatted.append(
f"- [{learning.get('category', 'insight')}] {learning.get('insight', '')}"
)
return "\n".join(formatted)
async def _query_gemini(self, prompt: str) -> Dict:
"""Query Gemini and parse response."""
try:
# Run in executor to not block
loop = asyncio.get_event_loop()
response = await loop.run_in_executor(
None, lambda: self._model.generate_content(prompt)
)
# Extract JSON from response
text = response.text
# Try to parse JSON
try:
# Find JSON block
if "```json" in text:
start = text.find("```json") + 7
end = text.find("```", start)
json_str = text[start:end].strip()
elif "```" in text:
start = text.find("```") + 3
end = text.find("```", start)
json_str = text[start:end].strip()
else:
json_str = text.strip()
plan = json.loads(json_str)
logger.info(f"Gemini plan parsed: {len(plan.get('tasks', []))} tasks")
return plan
except json.JSONDecodeError as e:
logger.error(f"Failed to parse Gemini response: {e}")
return {
"objective": "Parse error",
"error": str(e),
"raw_response": text[:500],
"tasks": [],
"test_scenarios": [],
}
except Exception as e:
logger.error(f"Gemini query failed: {e}")
return {
"objective": "Query error",
"error": str(e),
"tasks": [],
"test_scenarios": [],
}
def _mock_plan(self, objective: str, context: Dict) -> Dict:
"""Generate a mock plan for testing without Gemini API."""
logger.info("Using mock planner (Gemini not available)")
# Detect objective type
is_study_creation = any(
kw in objective.lower() for kw in ["create", "study", "new", "setup"]
)
tasks = []
test_scenarios = []
if is_study_creation:
study_name = context.get("study_name", "support_arm")
tasks = [
{
"id": "task_001",
"description": f"Create study directory structure for {study_name}",
"file": f"studies/_Other/{study_name}/",
"priority": "high",
"dependencies": [],
},
{
"id": "task_002",
"description": "Copy NX model files to study directory",
"file": f"studies/_Other/{study_name}/1_setup/model/",
"priority": "high",
"dependencies": ["task_001"],
},
{
"id": "task_003",
"description": "Create AtomizerSpec v2.0 configuration",
"file": f"studies/_Other/{study_name}/atomizer_spec.json",
"priority": "high",
"dependencies": ["task_002"],
},
{
"id": "task_004",
"description": "Create run_optimization.py script",
"file": f"studies/_Other/{study_name}/run_optimization.py",
"priority": "high",
"dependencies": ["task_003"],
},
{
"id": "task_005",
"description": "Create README.md documentation",
"file": f"studies/_Other/{study_name}/README.md",
"priority": "medium",
"dependencies": ["task_003"],
},
]
test_scenarios = [
{
"id": "test_001",
"name": "Study directory exists",
"type": "filesystem",
"steps": [{"action": "check_exists", "path": f"studies/_Other/{study_name}"}],
"expected_outcome": {"exists": True},
},
{
"id": "test_002",
"name": "AtomizerSpec is valid",
"type": "api",
"steps": [
{"action": "get", "endpoint": f"/api/studies/{study_name}/spec/validate"}
],
"expected_outcome": {"valid": True},
},
{
"id": "test_003",
"name": "Dashboard loads study",
"type": "browser",
"steps": [
{"action": "navigate", "url": f"/canvas/{study_name}"},
{"action": "wait_for", "selector": "[data-testid='canvas-container']"},
],
"expected_outcome": {"loaded": True},
},
]
return {
"objective": objective,
"approach": "Mock plan for development testing",
"tasks": tasks,
"test_scenarios": test_scenarios,
"risks": [
{
"description": "NX model files may have dependencies",
"mitigation": "Copy all related files (_i.prt, .fem, .sim)",
"severity": "high",
}
],
"acceptance_criteria": [
"Study directory structure created",
"AtomizerSpec validates without errors",
"Dashboard loads study canvas",
],
}
async def analyze_codebase(self, query: str) -> Dict:
"""
Use Gemini to analyze codebase state.
Args:
query: What to analyze (e.g., "current dashboard components")
Returns:
Analysis results
"""
# This would integrate with codebase scanning
# For now, return a stub
return {
"query": query,
"analysis": "Codebase analysis not yet implemented",
"recommendations": [],
}
async def generate_test_scenarios(
self,
feature: str,
context: Optional[Dict] = None,
) -> List[Dict]:
"""
Generate test scenarios for a specific feature.
Args:
feature: Feature to test (e.g., "study creation", "spec validation")
context: Additional context
Returns:
List of test scenarios
"""
prompt = f"""Generate test scenarios for the Atomizer feature: {feature}
Context: {json.dumps(context, indent=2) if context else "None"}
Output as JSON array of test scenarios:
```json
[
{{
"id": "test_001",
"name": "Test name",
"type": "api|browser|cli|filesystem",
"steps": [...]
"expected_outcome": {{...}}
}}
]
```
"""
if self.client == "mock":
return self._mock_plan(feature, context or {}).get("test_scenarios", [])
# Query Gemini
try:
loop = asyncio.get_event_loop()
response = await loop.run_in_executor(
None, lambda: self._model.generate_content(prompt)
)
text = response.text
if "```json" in text:
start = text.find("```json") + 7
end = text.find("```", start)
json_str = text[start:end].strip()
return json.loads(json_str)
except Exception as e:
logger.error(f"Failed to generate test scenarios: {e}")
return []