refactor: Archive experimental LLM features for MVP stability (Phase 1.1)

Moved experimental LLM integration code to optimization_engine/future/:
- llm_optimization_runner.py - Runtime LLM API runner
- llm_workflow_analyzer.py - Workflow analysis
- inline_code_generator.py - Auto-generate calculations
- hook_generator.py - Auto-generate hooks
- report_generator.py - LLM report generation
- extractor_orchestrator.py - Extractor orchestration

Added comprehensive optimization_engine/future/README.md explaining:
- MVP LLM strategy (Claude Code skills, not runtime LLM)
- Why files were archived
- When to revisit post-MVP
- Production architecture reference

Production runner confirmed: optimization_engine/runner.py is sole active runner.

This establishes clear separation between:
- Production code (stable, no runtime LLM dependencies)
- Experimental code (archived for post-MVP exploration)

Part of Phase 1: Core Stabilization & Organization for MVP

Generated with Claude Code

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-24 09:12:36 -05:00
parent 46515475cb
commit d228ccec66
377 changed files with 1195 additions and 16789 deletions

View File

@@ -98,13 +98,25 @@ class RealtimeTrackingCallback:
def _write_optimizer_state(self, study: optuna.Study, trial: optuna.trial.FrozenTrial):
"""Write current optimizer state."""
# [Protocol 11] For multi-objective, strategy is always NSGA-II
is_multi_objective = len(study.directions) > 1
if is_multi_objective:
# Multi-objective studies use NSGA-II, skip adaptive characterization
current_strategy = "NSGA-II"
current_phase = "multi_objective_optimization"
else:
# Single-objective uses intelligent strategy selection
current_strategy = getattr(self.optimizer, 'current_strategy', 'unknown')
current_phase = getattr(self.optimizer, 'current_phase', 'unknown')
state = {
"timestamp": datetime.now().isoformat(),
"trial_number": trial.number,
"total_trials": len(study.trials),
"current_phase": getattr(self.optimizer, 'current_phase', 'unknown'),
"current_strategy": getattr(self.optimizer, 'current_strategy', 'unknown'),
"is_multi_objective": len(study.directions) > 1,
"current_phase": current_phase,
"current_strategy": current_strategy,
"is_multi_objective": is_multi_objective,
"study_directions": [str(d) for d in study.directions],
}
@@ -132,18 +144,27 @@ class RealtimeTrackingCallback:
else:
log = []
# [Protocol 11] Handle both single and multi-objective
is_multi_objective = len(study.directions) > 1
# Append new trial
trial_entry = {
"trial_number": trial.number,
"timestamp": datetime.now().isoformat(),
"state": str(trial.state),
"params": trial.params,
"value": trial.value if trial.value is not None else None,
"values": trial.values if hasattr(trial, 'values') and trial.values is not None else None,
"duration_seconds": (trial.datetime_complete - trial.datetime_start).total_seconds() if trial.datetime_complete else None,
"user_attrs": dict(trial.user_attrs) if trial.user_attrs else {}
}
# Add objectives (Protocol 11 compliant)
if is_multi_objective:
trial_entry["values"] = trial.values if trial.values is not None else None
trial_entry["value"] = None # Not available
else:
trial_entry["value"] = trial.value if trial.value is not None else None
trial_entry["values"] = None
log.append(trial_entry)
self._atomic_write(trial_log_file, log)