diff --git a/.claude/skills/01_CHEATSHEET.md b/.claude/skills/01_CHEATSHEET.md index a34c6ac8..d4c600b3 100644 --- a/.claude/skills/01_CHEATSHEET.md +++ b/.claude/skills/01_CHEATSHEET.md @@ -1,11 +1,11 @@ --- skill_id: SKILL_001 -version: 2.2 -last_updated: 2025-12-28 +version: 2.3 +last_updated: 2025-12-29 type: reference code_dependencies: - optimization_engine/extractors/__init__.py - - optimization_engine/method_selector.py + - optimization_engine/core/method_selector.py - optimization_engine/utils/trial_manager.py - optimization_engine/utils/dashboard_db.py requires_skills: @@ -14,8 +14,8 @@ requires_skills: # Atomizer Quick Reference Cheatsheet -**Version**: 2.2 -**Updated**: 2025-12-28 +**Version**: 2.3 +**Updated**: 2025-12-29 **Purpose**: Rapid lookup for common operations. "I want X → Use Y" --- @@ -142,7 +142,7 @@ Question: Do you need >50 trials OR surrogate model? Exploits surrogate differentiability for **100-1000x faster** local refinement: ```python -from optimization_engine.gradient_optimizer import GradientOptimizer, run_lbfgs_polish +from optimization_engine.core.gradient_optimizer import GradientOptimizer, run_lbfgs_polish # Quick usage - polish from top FEA candidates results = run_lbfgs_polish(study_dir, n_starts=20, n_iterations=100) @@ -154,7 +154,7 @@ result = optimizer.optimize(starting_points=top_candidates, method='lbfgs') **CLI usage**: ```bash -python -m optimization_engine.gradient_optimizer studies/my_study --n-starts 20 +python -m optimization_engine.core.gradient_optimizer studies/my_study --n-starts 20 # Or per-study script (if available) python run_lbfgs_polish.py --n-starts 20 --grid-then-grad diff --git a/.claude/skills/modules/OPTIMIZATION_ENGINE_MIGRATION_PLAN.md b/.claude/skills/modules/OPTIMIZATION_ENGINE_MIGRATION_PLAN.md index 22ee06bc..c7f47fb1 100644 --- a/.claude/skills/modules/OPTIMIZATION_ENGINE_MIGRATION_PLAN.md +++ b/.claude/skills/modules/OPTIMIZATION_ENGINE_MIGRATION_PLAN.md @@ -2,15 +2,42 @@ ## Complete Guide for Safe Codebase Restructuring -**Document Version**: 2.1 (EXHAUSTIVE + CONTEXT ENGINEERING ALIGNED) +**Document Version**: 3.0 (COMPLETED) **Created**: 2025-12-23 -**Updated**: 2025-12-28 -**Status**: READY FOR EXECUTION +**Updated**: 2025-12-29 +**Status**: ✅ COMPLETED - Merged to main **Risk Level**: HIGH - Affects 500+ files across entire codebase **Next Phase**: ATOMIZER_CONTEXT_ENGINEERING_PLAN.md (execute AFTER this migration) --- +## ✅ Migration Completed: 2025-12-29 + +### Final Statistics +| Metric | Value | +|--------|-------| +| Files modified | 120 | +| Import changes | ~200 | +| Commits | 2 (pre-migration checkpoint + migration) | +| Branch | `refactor/optimization-engine-v2` merged to `main` | +| Version bump | `optimization_engine` v1.0 → v2.0.0 | + +### Key Changes Made +1. Created new directory structure: `core/`, `processors/surrogates/`, `nx/`, `study/`, `reporting/`, `config/` +2. Moved 60+ Python files to new locations +3. Updated all imports across 125 files +4. Added lazy-loading `__init__.py` files to avoid circular imports +5. Implemented backwards compatibility layer with deprecation warnings +6. Updated `feature_registry.json` paths + +### Post-Migration Notes +- All existing `run_optimization.py` scripts continue to work +- Old imports emit deprecation warnings but still function +- Dashboard integration preserved +- Study databases unchanged + +--- + ## Execution Order (IMPORTANT) This migration plan should be executed **BEFORE** the Context Engineering plan. diff --git a/optimization_engine/feature_registry.json b/optimization_engine/feature_registry.json index 7fc7eadf..2cc12f4f 100644 --- a/optimization_engine/feature_registry.json +++ b/optimization_engine/feature_registry.json @@ -1,7 +1,7 @@ { "feature_registry": { - "version": "0.2.0", - "last_updated": "2025-01-16", + "version": "0.3.0", + "last_updated": "2025-12-29", "description": "Comprehensive catalog of Atomizer capabilities for LLM-driven optimization", "architecture_doc": "docs/FEATURE_REGISTRY_ARCHITECTURE.md", "categories": { @@ -162,9 +162,9 @@ "lifecycle_stage": "all", "abstraction_level": "workflow", "implementation": { - "file_path": "optimization_engine/runner.py", + "file_path": "optimization_engine/core/runner.py", "function_name": "run_optimization", - "entry_point": "from optimization_engine.runner import run_optimization" + "entry_point": "from optimization_engine.core.runner import run_optimization" }, "interface": { "inputs": [ @@ -240,7 +240,7 @@ "lifecycle_stage": "optimization", "abstraction_level": "primitive", "implementation": { - "file_path": "optimization_engine/runner.py", + "file_path": "optimization_engine/core/runner.py", "function_name": "optuna.samplers.TPESampler", "entry_point": "import optuna.samplers.TPESampler" }, @@ -295,9 +295,9 @@ "lifecycle_stage": "solve", "abstraction_level": "primitive", "implementation": { - "file_path": "optimization_engine/nx_solver.py", + "file_path": "optimization_engine/nx/solver.py", "function_name": "run_nx_simulation", - "entry_point": "from optimization_engine.nx_solver import run_nx_simulation" + "entry_point": "from optimization_engine.nx.solver import run_nx_simulation" }, "interface": { "inputs": [ @@ -370,9 +370,9 @@ "lifecycle_stage": "pre_solve", "abstraction_level": "primitive", "implementation": { - "file_path": "optimization_engine/nx_updater.py", + "file_path": "optimization_engine/nx/updater.py", "function_name": "update_nx_expressions", - "entry_point": "from optimization_engine.nx_updater import update_nx_expressions" + "entry_point": "from optimization_engine.nx.updater import update_nx_expressions" }, "interface": { "inputs": [ @@ -558,9 +558,9 @@ "lifecycle_stage": "pre_optimization", "abstraction_level": "composite", "implementation": { - "file_path": "optimization_engine/runner.py", + "file_path": "optimization_engine/study/creator.py", "function_name": "setup_study", - "entry_point": "from optimization_engine.runner import setup_study" + "entry_point": "from optimization_engine.study.creator import setup_study" }, "interface": { "inputs": [