Compare commits
11 Commits
backup/pre
...
32caa5d05c
| Author | SHA1 | Date | |
|---|---|---|---|
| 32caa5d05c | |||
| b1ffc64407 | |||
| 8c7a589547 | |||
| 5e64cfb211 | |||
| f0e594570a | |||
| e78b10929c | |||
| 773f8ff8af | |||
| 0110d80401 | |||
| 820c34c39a | |||
| eabcc4c3ca | |||
| 82f36689b7 |
@@ -49,7 +49,7 @@ Use keyword matching to load appropriate context:
|
|||||||
| Run optimization | "run", "start", "execute", "trials" | OP_02 + SYS_15 | Execute optimization |
|
| Run optimization | "run", "start", "execute", "trials" | OP_02 + SYS_15 | Execute optimization |
|
||||||
| Check progress | "status", "progress", "how many" | OP_03 | Query study.db |
|
| Check progress | "status", "progress", "how many" | OP_03 | Query study.db |
|
||||||
| Analyze results | "results", "best", "Pareto", "analyze" | OP_04 | Generate analysis |
|
| Analyze results | "results", "best", "Pareto", "analyze" | OP_04 | Generate analysis |
|
||||||
| Neural acceleration | "neural", "surrogate", "turbo", "NN" | SYS_14 + SYS_15 | Method selection |
|
| Neural acceleration | "neural", "surrogate", "turbo", "NN", "SAT" | SYS_14 + SYS_16 | Method selection |
|
||||||
| NX/CAD help | "NX", "model", "mesh", "expression" | MCP + nx-docs | Use Siemens MCP |
|
| NX/CAD help | "NX", "model", "mesh", "expression" | MCP + nx-docs | Use Siemens MCP |
|
||||||
| Physics insights | "zernike", "stress view", "insight" | SYS_16 | Generate insights |
|
| Physics insights | "zernike", "stress view", "insight" | SYS_16 | Generate insights |
|
||||||
| Troubleshoot | "error", "failed", "fix", "debug" | OP_06 | Diagnose issues |
|
| Troubleshoot | "error", "failed", "fix", "debug" | OP_06 | Diagnose issues |
|
||||||
@@ -172,7 +172,8 @@ studies/{geometry_type}/{study_name}/
|
|||||||
│ SYS_10: IMSO (single-obj) SYS_11: Multi-objective │
|
│ SYS_10: IMSO (single-obj) SYS_11: Multi-objective │
|
||||||
│ SYS_12: Extractors SYS_13: Dashboard │
|
│ SYS_12: Extractors SYS_13: Dashboard │
|
||||||
│ SYS_14: Neural Accel SYS_15: Method Selector │
|
│ SYS_14: Neural Accel SYS_15: Method Selector │
|
||||||
│ SYS_16: Study Insights │
|
│ SYS_16: SAT (Self-Aware Turbo) - VALIDATED v3, WS=205.58 │
|
||||||
|
│ SYS_17: Context Engineering │
|
||||||
└─────────────────────────────────────────────────────────────────┘
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
▼
|
▼
|
||||||
┌─────────────────────────────────────────────────────────────────┐
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
|||||||
@@ -2,110 +2,42 @@
|
|||||||
"permissions": {
|
"permissions": {
|
||||||
"allow": [
|
"allow": [
|
||||||
"Bash(dir:*)",
|
"Bash(dir:*)",
|
||||||
"Bash(sqlite3:*)",
|
"Bash(powershell -Command:*)",
|
||||||
"Bash(timeout /t 30 /nobreak)",
|
|
||||||
"Bash(npm install:*)",
|
|
||||||
"Bash(git add:*)",
|
|
||||||
"Bash(git commit:*)",
|
|
||||||
"Bash(git push:*)",
|
|
||||||
"Bash(python:*)",
|
"Bash(python:*)",
|
||||||
"Bash(conda activate:*)",
|
"Bash(git:*)",
|
||||||
"Bash(C:/Users/Antoine/miniconda3/envs/atomizer/python.exe:*)",
|
"Bash(npm:*)",
|
||||||
"Bash(cat:*)",
|
"Bash(conda:*)",
|
||||||
"Bash(C:UsersAntoineminiconda3envsatomizerpython.exe run_adaptive_mirror_optimization.py --fea-budget 100 --batch-size 5 --strategy hybrid)",
|
"Bash(pip:*)",
|
||||||
"Bash(/c/Users/Antoine/miniconda3/envs/atomizer/python.exe:*)",
|
"Bash(cmd /c:*)",
|
||||||
"Bash(npm run build:*)",
|
"Bash(tasklist:*)",
|
||||||
"Bash(npm uninstall:*)",
|
"Bash(taskkill:*)",
|
||||||
|
"Bash(robocopy:*)",
|
||||||
|
"Bash(xcopy:*)",
|
||||||
|
"Bash(del:*)",
|
||||||
|
"Bash(type:*)",
|
||||||
|
"Bash(where:*)",
|
||||||
"Bash(netstat:*)",
|
"Bash(netstat:*)",
|
||||||
"Bash(findstr:*)",
|
"Bash(findstr:*)",
|
||||||
"Bash(curl:*)",
|
"Bash(curl:*)",
|
||||||
"Bash(npx tsc:*)",
|
"Read",
|
||||||
"Bash(atomizer-dashboard/README.md )",
|
"Skill(dashboard:*)",
|
||||||
"Bash(atomizer-dashboard/backend/api/main.py )",
|
"Bash(C:Usersantoianaconda3envsatomizerpython.exe:*)",
|
||||||
"Bash(atomizer-dashboard/backend/api/routes/optimization.py )",
|
"Bash(del \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction_flat_back_V5\\\\3_results\\\\study.db\")",
|
||||||
"Bash(atomizer-dashboard/backend/api/routes/claude.py )",
|
"Bash(C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe -c:*)",
|
||||||
"Bash(atomizer-dashboard/backend/api/routes/terminal.py )",
|
"Bash(C:Usersantoianaconda3envsatomizerpython.exe run_optimization.py --trials 1)",
|
||||||
"Bash(atomizer-dashboard/backend/api/services/ )",
|
"Bash(C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe -m py_compile:*)",
|
||||||
"Bash(atomizer-dashboard/backend/requirements.txt )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver analyze \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\")",
|
||||||
"Bash(atomizer-dashboard/frontend/package.json )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver cleanup \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction_V12\")",
|
||||||
"Bash(atomizer-dashboard/frontend/package-lock.json )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver cleanup \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction_V2\")",
|
||||||
"Bash(atomizer-dashboard/frontend/src/components/ClaudeChat.tsx )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver cleanup \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction_V11\")",
|
||||||
"Bash(atomizer-dashboard/frontend/src/components/ClaudeTerminal.tsx )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver cleanup \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction_V11\" --execute)",
|
||||||
"Bash(atomizer-dashboard/frontend/src/components/dashboard/ControlPanel.tsx )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver cleanup \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction_flat_back_V3\")",
|
||||||
"Bash(atomizer-dashboard/frontend/src/pages/Dashboard.tsx )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver cleanup \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction_flat_back_V3\" --execute)",
|
||||||
"Bash(atomizer-dashboard/frontend/src/context/ )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver cleanup \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction_flat_back_V6\" --execute)",
|
||||||
"Bash(atomizer-dashboard/frontend/src/pages/Home.tsx )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver cleanup \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction_flat_back_V1\" --execute)",
|
||||||
"Bash(atomizer-dashboard/frontend/src/App.tsx )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver cleanup \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction_flat_back_V5\" --execute)",
|
||||||
"Bash(atomizer-dashboard/frontend/src/api/client.ts )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver cleanup \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction_V12\" --execute)",
|
||||||
"Bash(atomizer-dashboard/frontend/src/components/layout/Sidebar.tsx )",
|
"Bash(\"C:\\\\Users\\\\antoi\\\\anaconda3\\\\envs\\\\atomizer\\\\python.exe\" -m optimization_engine.utils.study_archiver cleanup \"C:\\\\Users\\\\antoi\\\\Atomizer\\\\studies\\\\M1_Mirror\\\\m1_mirror_cost_reduction\" --execute)"
|
||||||
"Bash(atomizer-dashboard/frontend/src/index.css )",
|
|
||||||
"Bash(atomizer-dashboard/frontend/src/pages/Results.tsx )",
|
|
||||||
"Bash(atomizer-dashboard/frontend/tailwind.config.js )",
|
|
||||||
"Bash(docs/07_DEVELOPMENT/DASHBOARD_IMPROVEMENT_PLAN.md)",
|
|
||||||
"Bash(taskkill:*)",
|
|
||||||
"Bash(xargs:*)",
|
|
||||||
"Bash(cmd.exe /c:*)",
|
|
||||||
"Bash(powershell.exe -Command:*)",
|
|
||||||
"Bash(where:*)",
|
|
||||||
"Bash(type %USERPROFILE%.claude*)",
|
|
||||||
"Bash(conda create:*)",
|
|
||||||
"Bash(cmd /c \"conda create -n atomizer python=3.10 -y\")",
|
|
||||||
"Bash(cmd /c \"where conda\")",
|
|
||||||
"Bash(cmd /c \"dir /b C:\\Users\\antoi\\anaconda3\\Scripts\\conda.exe 2>nul || dir /b C:\\Users\\antoi\\miniconda3\\Scripts\\conda.exe 2>nul || dir /b C:\\ProgramData\\anaconda3\\Scripts\\conda.exe 2>nul || dir /b C:\\ProgramData\\miniconda3\\Scripts\\conda.exe 2>nul || echo NOT_FOUND\")",
|
|
||||||
"Bash(cmd /c \"if exist C:\\Users\\antoi\\anaconda3\\Scripts\\conda.exe (echo FOUND: anaconda3) else if exist C:\\Users\\antoi\\miniconda3\\Scripts\\conda.exe (echo FOUND: miniconda3) else if exist C:\\ProgramData\\anaconda3\\Scripts\\conda.exe (echo FOUND: ProgramData\\anaconda3) else (echo NOT_FOUND)\")",
|
|
||||||
"Bash(powershell:*)",
|
|
||||||
"Bash(C:Usersantoianaconda3Scriptsconda.exe create -n atomizer python=3.10 -y)",
|
|
||||||
"Bash(cmd /c \"C:\\Users\\antoi\\anaconda3\\Scripts\\conda.exe create -n atomizer python=3.10 -y\")",
|
|
||||||
"Bash(cmd /c \"set SPLM_LICENSE_SERVER=28000@dalidou;28000@100.80.199.40 && \"\"C:\\Program Files\\Siemens\\DesigncenterNX2512\\NXBIN\\run_journal.exe\"\" \"\"C:\\Users\\antoi\\Atomizer\\optimization_engine\\solve_simulation.py\"\" -args \"\"C:\\Users\\antoi\\Atomizer\\studies\\m1_mirror_adaptive_V15\\2_iterations\\iter2\\ASSY_M1_assyfem1_sim1.sim\"\" \"\"Solution 1\"\" 2>&1\")",
|
|
||||||
"Bash(cmd /c \"set SPLM_LICENSE_SERVER=28000@dalidou;28000@100.80.199.40 && \"C:Program FilesSiemensDesigncenterNX2512NXBINrun_journal.exe\" \"C:UsersantoiAtomizernx_journalsextract_part_mass_material.py\" -args \"C:UsersantoiAtomizerstudiesm1_mirror_cost_reduction1_setupmodelM1_Blank.prt\" \"C:UsersantoiAtomizerstudiesm1_mirror_cost_reduction1_setupmodel\" 2>&1\")",
|
|
||||||
"Bash(npm run dev:*)",
|
|
||||||
"Bash(cmd /c \"cd /d C:\\Users\\antoi\\Atomizer\\atomizer-dashboard\\frontend && npm run dev\")",
|
|
||||||
"Bash(cmd /c \"cd /d C:\\Users\\antoi\\Atomizer\\atomizer-dashboard\\frontend && dir package.json && npm --version\")",
|
|
||||||
"Bash(cmd /c \"set SPLM_LICENSE_SERVER=28000@dalidou;28000@100.80.199.40 && \"\"C:\\Program Files\\Siemens\\DesigncenterNX2512\\NXBIN\\run_journal.exe\"\" \"\"C:\\Users\\antoi\\Atomizer\\nx_journals\\extract_part_mass_material.py\"\" -args \"\"C:\\Users\\antoi\\Atomizer\\studies\\m1_mirror_cost_reduction\\1_setup\\model\\M1_Blank.prt\"\" \"\"C:\\Users\\antoi\\Atomizer\\studies\\m1_mirror_cost_reduction\\1_setup\\model\"\" 2>&1\")",
|
|
||||||
"Bash(cmd /c \"set SPLM_LICENSE_SERVER=28000@dalidou;28000@100.80.199.40 && \"\"C:\\Program Files\\Siemens\\DesigncenterNX2512\\NXBIN\\run_journal.exe\"\" \"\"C:\\Users\\antoi\\Atomizer\\nx_journals\\extract_expressions.py\"\" -args \"\"C:\\Users\\antoi\\Atomizer\\studies\\m1_mirror_cost_reduction\\1_setup\\model\\M1_Blank.prt\"\" \"\"C:\\Users\\antoi\\Atomizer\\studies\\m1_mirror_cost_reduction\\1_setup\\model\"\" 2>&1\")",
|
|
||||||
"Bash(cmd /c \"set SPLM_LICENSE_SERVER=28000@dalidou;28000@100.80.199.40 && \"\"C:\\Program Files\\Siemens\\DesigncenterNX2512\\NXBIN\\run_journal.exe\"\" \"\"C:\\Users\\antoi\\Atomizer\\nx_journals\\extract_expressions.py\"\" -args \"\"C:\\Users\\antoi\\Atomizer\\studies\\m1_mirror_cost_reduction\\1_setup\\model\\M1_Blank.prt\"\" \"\"C:\\Users\\antoi\\Atomizer\\studies\\m1_mirror_cost_reduction\\1_setup\\model\"\"\")",
|
|
||||||
"Bash(cmd /c:*)",
|
|
||||||
"Bash(taskkill /F /FI \"WINDOWTITLE eq *uvicorn*\")",
|
|
||||||
"Bash(python -m uvicorn:*)",
|
|
||||||
"Bash(conda run:*)",
|
|
||||||
"Bash(/c/Users/antoi/miniconda3/envs/atomizer/python.exe -m uvicorn:*)",
|
|
||||||
"Bash(/c/Users/antoi/anaconda3/envs/atomizer/python.exe -m uvicorn:*)",
|
|
||||||
"Bash(/c/Users/antoi/anaconda3/envs/atomizer/python.exe:*)",
|
|
||||||
"Bash(tasklist:*)",
|
|
||||||
"Bash(wmic process where \"ProcessId=147068\" delete)",
|
|
||||||
"Bash(cmd.exe //c \"taskkill /F /PID 147068\")",
|
|
||||||
"Bash(pip show:*)",
|
|
||||||
"Bash(python3:*)",
|
|
||||||
"Bash(python extract_all_mirror_data.py:*)",
|
|
||||||
"Bash(C:Usersantoiminiconda3envsatomizerpython.exe extract_all_mirror_data.py)",
|
|
||||||
"Bash(/c/Users/antoi/miniconda3/envs/atomizer/python.exe:*)",
|
|
||||||
"Bash(grep:*)",
|
|
||||||
"Bash(python -c:*)",
|
|
||||||
"Bash(C:Usersantoianaconda3envsatomizerpython.exe -c \"\nimport pandas as pd\ndf = pd.read_csv(r''c:\\Users\\antoi\\Atomizer\\studies\\m1_mirror_all_trials_export.csv'')\n\n# Check which columns have data\nprint(''=== Column data availability ==='')\nfor col in df.columns:\n non_null = df[col].notna().sum()\n print(f''{col}: {non_null}/{len(df)} ({100*non_null/len(df):.1f}%)'')\n\nprint(''\\n=== Studies in dataset ==='')\nprint(df[''study''].value_counts())\n\")",
|
|
||||||
"Bash(cmd /c \"C:\\Users\\antoi\\anaconda3\\envs\\atomizer\\python.exe -c \"\"import pandas as pd; df = pd.read_csv(r''c:\\Users\\antoi\\Atomizer\\studies\\m1_mirror_all_trials_export.csv''); print(''Rows:'', len(df)); print(df.columns.tolist())\"\"\")",
|
|
||||||
"Bash(robocopy:*)",
|
|
||||||
"Bash(xcopy:*)",
|
|
||||||
"Bash(ls:*)",
|
|
||||||
"Bash(dir \"c:\\Users\\antoi\\Atomizer\\studies\\*.png\")",
|
|
||||||
"Bash(powershell -Command \"Get-Process | Where-Object { $_Modules.FileName -like ''*study.db*'' } | Select-Object Id, ProcessName\")",
|
|
||||||
"Bash(powershell -Command:*)",
|
|
||||||
"Bash(C:/Users/antoi/miniconda3/envs/atomizer/python.exe -m uvicorn:*)",
|
|
||||||
"Bash(dir /s /b \"C:\\Users\\antoi\\*conda*\")",
|
|
||||||
"Bash(conda run -n atomizer python:*)",
|
|
||||||
"Bash(C:/ProgramData/anaconda3/condabin/conda.bat run -n atomizer python -c \"\nimport sqlite3\n\ndb_path = ''studies/M1_Mirror/m1_mirror_cost_reduction_V6/3_results/study.db''\nconn = sqlite3.connect(db_path)\ncursor = conn.cursor()\n\n# Get counts\ncursor.execute(''SELECT COUNT(*) FROM trials'')\ntotal = cursor.fetchone()[0]\n\ncursor.execute(\"\"SELECT COUNT(*) FROM trials WHERE state = ''COMPLETE''\"\")\ncomplete = cursor.fetchone()[0]\n\nprint(f''=== V6 Study Status ==='')\nprint(f''Total trials: {total}'')\nprint(f''Completed: {complete}'')\nprint(f''Failed/Pruned: {total - complete}'')\nprint(f''Progress: {complete}/200 ({100*complete/200:.1f}%)'')\n\n# Get objectives stats\nobjs = [''rel_filtered_rms_40_vs_20'', ''rel_filtered_rms_60_vs_20'', ''mfg_90_optician_workload'', ''mass_kg'']\nprint(f''\\n=== Objectives Stats ==='')\nfor obj in objs:\n cursor.execute(f\"\"SELECT MIN({obj}), MAX({obj}), AVG({obj}) FROM trials WHERE state = ''COMPLETE'' AND {obj} IS NOT NULL\"\")\n result = cursor.fetchone()\n if result and result[0] is not None:\n print(f''{obj}: min={result[0]:.4f}, max={result[1]:.4f}, mean={result[2]:.4f}'')\n\n# Design variables stats \ndvs = [''whiffle_min'', ''whiffle_outer_to_vertical'', ''whiffle_triangle_closeness'', ''blank_backface_angle'', ''Pocket_Radius'']\nprint(f''\\n=== Design Variables Explored ==='')\nfor dv in dvs:\n try:\n cursor.execute(f\"\"SELECT MIN({dv}), MAX({dv}), AVG({dv}) FROM trials WHERE state = ''COMPLETE''\"\")\n result = cursor.fetchone()\n if result and result[0] is not None:\n print(f''{dv}: min={result[0]:.3f}, max={result[1]:.3f}, mean={result[2]:.3f}'')\n except Exception as e:\n print(f''{dv}: error - {e}'')\n\nconn.close()\n\")",
|
|
||||||
"Bash(/c/Users/antoi/anaconda3/python.exe:*)",
|
|
||||||
"Bash(C:UsersantoiAtomizertemp_extract.bat)",
|
|
||||||
"Bash(dir /b \"C:\\Users\\antoi\\Atomizer\\knowledge_base\\lac\")",
|
|
||||||
"Bash(pip install:*)",
|
|
||||||
"Bash(dir \"C:\\Users\\antoi\\Atomizer\\studies\\M1_Mirror\\m1_mirror_cost_reduction_V7\\3_results\")",
|
|
||||||
"Bash(call \"%USERPROFILE%\\anaconda3\\Scripts\\activate.bat\" atomizer)",
|
|
||||||
"Bash(cmd /c \"cd /d c:\\Users\\antoi\\Atomizer && call %USERPROFILE%\\anaconda3\\Scripts\\activate.bat atomizer && python -c \"\"import sys; sys.path.insert(0, ''.''); from optimization_engine.extractors import ZernikeExtractor; print(''OK''); import inspect; print(inspect.signature(ZernikeExtractor.extract_relative))\"\"\")",
|
|
||||||
"Bash(cmd /c \"cd /d c:\\Users\\antoi\\Atomizer && c:\\Users\\antoi\\anaconda3\\envs\\atomizer\\python.exe -c \"\"import sys; sys.path.insert(0, ''.''); from optimization_engine.extractors import ZernikeExtractor; print(''Import OK''); import inspect; sig = inspect.signature(ZernikeExtractor.extract_relative); print(''Signature:'', sig)\"\"\")",
|
|
||||||
"Bash(c:Usersantoianaconda3envsatomizerpython.exe c:UsersantoiAtomizertoolstest_zernike_import.py)",
|
|
||||||
"Bash(dir \"C:\\Users\\antoi\\Atomizer\\studies\\M1_Mirror\\m1_mirror_cost_reduction_V7\\3_results\\best_design_archive\")",
|
|
||||||
"Bash(dir \"C:\\Users\\antoi\\Atomizer\\studies\\M1_Mirror\\m1_mirror_cost_reduction_V7\\3_results\\best_design_archive\\20251220_010128\")",
|
|
||||||
"Bash(dir /s /b \"C:\\Users\\antoi\\Atomizer\\studies\\M1_Mirror\\m1_mirror_cost_reduction_V8\")",
|
|
||||||
"Bash(c:/Users/antoi/anaconda3/envs/atomizer/python.exe:*)"
|
|
||||||
],
|
],
|
||||||
"deny": [],
|
"deny": [],
|
||||||
"ask": []
|
"ask": []
|
||||||
|
|||||||
@@ -65,7 +65,12 @@ When a user request arrives, classify it:
|
|||||||
User Request
|
User Request
|
||||||
│
|
│
|
||||||
├─► CREATE something?
|
├─► CREATE something?
|
||||||
│ ├─ "new study", "set up", "create", "optimize this"
|
│ ├─ "new study", "set up", "create", "optimize this", "create a study"
|
||||||
|
│ ├─► DEFAULT: Interview Mode (guided Q&A with validation)
|
||||||
|
│ │ └─► Load: modules/study-interview-mode.md + OP_01
|
||||||
|
│ │
|
||||||
|
│ └─► MANUAL mode? (power users, explicit request)
|
||||||
|
│ ├─ "quick setup", "skip interview", "manual config"
|
||||||
│ └─► Load: OP_01_CREATE_STUDY.md + core/study-creation-core.md
|
│ └─► Load: OP_01_CREATE_STUDY.md + core/study-creation-core.md
|
||||||
│
|
│
|
||||||
├─► RUN something?
|
├─► RUN something?
|
||||||
@@ -84,6 +89,10 @@ User Request
|
|||||||
│ ├─ "error", "failed", "not working", "crashed"
|
│ ├─ "error", "failed", "not working", "crashed"
|
||||||
│ └─► Load: OP_06_TROUBLESHOOT.md
|
│ └─► Load: OP_06_TROUBLESHOOT.md
|
||||||
│
|
│
|
||||||
|
├─► MANAGE disk space?
|
||||||
|
│ ├─ "disk", "space", "cleanup", "archive", "storage"
|
||||||
|
│ └─► Load: OP_07_DISK_OPTIMIZATION.md
|
||||||
|
│
|
||||||
├─► CONFIGURE settings?
|
├─► CONFIGURE settings?
|
||||||
│ ├─ "change", "modify", "settings", "parameters"
|
│ ├─ "change", "modify", "settings", "parameters"
|
||||||
│ └─► Load relevant SYS_* protocol
|
│ └─► Load relevant SYS_* protocol
|
||||||
@@ -103,12 +112,14 @@ User Request
|
|||||||
|
|
||||||
| User Intent | Keywords | Protocol | Skill to Load | Privilege |
|
| User Intent | Keywords | Protocol | Skill to Load | Privilege |
|
||||||
|-------------|----------|----------|---------------|-----------|
|
|-------------|----------|----------|---------------|-----------|
|
||||||
| Create study | "new", "set up", "create", "optimize" | OP_01 | **core/study-creation-core.md** | user |
|
| **Create study (DEFAULT)** | "new", "set up", "create", "optimize", "create a study" | OP_01 | **modules/study-interview-mode.md** | user |
|
||||||
|
| Create study (manual) | "quick setup", "skip interview", "manual config" | OP_01 | core/study-creation-core.md | power_user |
|
||||||
| Run optimization | "start", "run", "execute", "begin" | OP_02 | - | user |
|
| Run optimization | "start", "run", "execute", "begin" | OP_02 | - | user |
|
||||||
| Monitor progress | "status", "progress", "trials", "check" | OP_03 | - | user |
|
| Monitor progress | "status", "progress", "trials", "check" | OP_03 | - | user |
|
||||||
| Analyze results | "results", "best", "compare", "pareto" | OP_04 | - | user |
|
| Analyze results | "results", "best", "compare", "pareto" | OP_04 | - | user |
|
||||||
| Export training data | "export", "training data", "neural" | OP_05 | modules/neural-acceleration.md | user |
|
| Export training data | "export", "training data", "neural" | OP_05 | modules/neural-acceleration.md | user |
|
||||||
| Debug issues | "error", "failed", "not working", "help" | OP_06 | - | user |
|
| Debug issues | "error", "failed", "not working", "help" | OP_06 | - | user |
|
||||||
|
| **Disk management** | "disk", "space", "cleanup", "archive" | **OP_07** | modules/study-disk-optimization.md | user |
|
||||||
| Understand IMSO | "protocol 10", "IMSO", "adaptive" | SYS_10 | - | user |
|
| Understand IMSO | "protocol 10", "IMSO", "adaptive" | SYS_10 | - | user |
|
||||||
| Multi-objective | "pareto", "NSGA", "multi-objective" | SYS_11 | - | user |
|
| Multi-objective | "pareto", "NSGA", "multi-objective" | SYS_11 | - | user |
|
||||||
| Extractors | "extractor", "displacement", "stress" | SYS_12 | modules/extractors-catalog.md | user |
|
| Extractors | "extractor", "displacement", "stress" | SYS_12 | modules/extractors-catalog.md | user |
|
||||||
|
|||||||
425
.claude/skills/00_BOOTSTRAP_V2.md
Normal file
425
.claude/skills/00_BOOTSTRAP_V2.md
Normal file
@@ -0,0 +1,425 @@
|
|||||||
|
---
|
||||||
|
skill_id: SKILL_000
|
||||||
|
version: 3.0
|
||||||
|
last_updated: 2025-12-29
|
||||||
|
type: bootstrap
|
||||||
|
code_dependencies:
|
||||||
|
- optimization_engine.context.playbook
|
||||||
|
- optimization_engine.context.session_state
|
||||||
|
- optimization_engine.context.feedback_loop
|
||||||
|
requires_skills: []
|
||||||
|
---
|
||||||
|
|
||||||
|
# Atomizer LLM Bootstrap v3.0 - Context-Aware Sessions
|
||||||
|
|
||||||
|
**Version**: 3.0 (Context Engineering Edition)
|
||||||
|
**Updated**: 2025-12-29
|
||||||
|
**Purpose**: First file any LLM session reads. Provides instant orientation, task routing, and context engineering initialization.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Orientation (30 Seconds)
|
||||||
|
|
||||||
|
**Atomizer** = LLM-first FEA optimization framework using NX Nastran + Optuna + Neural Networks.
|
||||||
|
|
||||||
|
**Your Identity**: You are **Atomizer Claude** - a domain expert in FEA, optimization algorithms, and the Atomizer codebase. Not a generic assistant.
|
||||||
|
|
||||||
|
**Core Philosophy**: "Talk, don't click." Users describe what they want; you configure and execute.
|
||||||
|
|
||||||
|
**NEW in v3.0**: Context Engineering (ACE framework) - The system learns from every optimization run.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Session Startup Checklist
|
||||||
|
|
||||||
|
On **every new session**, complete these steps:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ SESSION STARTUP (v3.0) │
|
||||||
|
├─────────────────────────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ STEP 1: Initialize Context Engineering │
|
||||||
|
│ □ Load playbook from knowledge_base/playbook.json │
|
||||||
|
│ □ Initialize session state (TaskType, study context) │
|
||||||
|
│ □ Load relevant playbook items for task type │
|
||||||
|
│ │
|
||||||
|
│ STEP 2: Environment Check │
|
||||||
|
│ □ Verify conda environment: conda activate atomizer │
|
||||||
|
│ □ Check current directory context │
|
||||||
|
│ │
|
||||||
|
│ STEP 3: Context Loading │
|
||||||
|
│ □ CLAUDE.md loaded (system instructions) │
|
||||||
|
│ □ This file (00_BOOTSTRAP_V2.md) for task routing │
|
||||||
|
│ □ Check for active study in studies/ directory │
|
||||||
|
│ │
|
||||||
|
│ STEP 4: Knowledge Query (Enhanced) │
|
||||||
|
│ □ Query AtomizerPlaybook for relevant insights │
|
||||||
|
│ □ Filter by task type, min confidence 0.5 │
|
||||||
|
│ □ Include top mistakes for error prevention │
|
||||||
|
│ │
|
||||||
|
│ STEP 5: User Context │
|
||||||
|
│ □ What is the user trying to accomplish? │
|
||||||
|
│ □ Is there an active study context? │
|
||||||
|
│ □ What privilege level? (default: user) │
|
||||||
|
│ │
|
||||||
|
└─────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Context Engineering Initialization
|
||||||
|
|
||||||
|
```python
|
||||||
|
# On session start, initialize context engineering
|
||||||
|
from optimization_engine.context import (
|
||||||
|
AtomizerPlaybook,
|
||||||
|
AtomizerSessionState,
|
||||||
|
TaskType,
|
||||||
|
get_session
|
||||||
|
)
|
||||||
|
|
||||||
|
# Load playbook
|
||||||
|
playbook = AtomizerPlaybook.load(Path("knowledge_base/playbook.json"))
|
||||||
|
|
||||||
|
# Initialize session
|
||||||
|
session = get_session()
|
||||||
|
session.exposed.task_type = TaskType.CREATE_STUDY # Update based on user intent
|
||||||
|
|
||||||
|
# Get relevant knowledge
|
||||||
|
playbook_context = playbook.get_context_for_task(
|
||||||
|
task_type="optimization",
|
||||||
|
max_items=15,
|
||||||
|
min_confidence=0.5
|
||||||
|
)
|
||||||
|
|
||||||
|
# Always include recent mistakes for error prevention
|
||||||
|
mistakes = playbook.get_by_category(InsightCategory.MISTAKE, min_score=-2)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Task Classification Tree
|
||||||
|
|
||||||
|
When a user request arrives, classify it and update session state:
|
||||||
|
|
||||||
|
```
|
||||||
|
User Request
|
||||||
|
│
|
||||||
|
├─► CREATE something?
|
||||||
|
│ ├─ "new study", "set up", "create", "optimize this"
|
||||||
|
│ ├─ session.exposed.task_type = TaskType.CREATE_STUDY
|
||||||
|
│ └─► Load: OP_01_CREATE_STUDY.md + core/study-creation-core.md
|
||||||
|
│
|
||||||
|
├─► RUN something?
|
||||||
|
│ ├─ "start", "run", "execute", "begin optimization"
|
||||||
|
│ ├─ session.exposed.task_type = TaskType.RUN_OPTIMIZATION
|
||||||
|
│ └─► Load: OP_02_RUN_OPTIMIZATION.md
|
||||||
|
│
|
||||||
|
├─► CHECK status?
|
||||||
|
│ ├─ "status", "progress", "how many trials", "what's happening"
|
||||||
|
│ ├─ session.exposed.task_type = TaskType.MONITOR_PROGRESS
|
||||||
|
│ └─► Load: OP_03_MONITOR_PROGRESS.md
|
||||||
|
│
|
||||||
|
├─► ANALYZE results?
|
||||||
|
│ ├─ "results", "best design", "compare", "pareto"
|
||||||
|
│ ├─ session.exposed.task_type = TaskType.ANALYZE_RESULTS
|
||||||
|
│ └─► Load: OP_04_ANALYZE_RESULTS.md
|
||||||
|
│
|
||||||
|
├─► DEBUG/FIX error?
|
||||||
|
│ ├─ "error", "failed", "not working", "crashed"
|
||||||
|
│ ├─ session.exposed.task_type = TaskType.DEBUG_ERROR
|
||||||
|
│ └─► Load: OP_06_TROUBLESHOOT.md + playbook[MISTAKE]
|
||||||
|
│
|
||||||
|
├─► MANAGE disk space?
|
||||||
|
│ ├─ "disk", "space", "cleanup", "archive", "storage"
|
||||||
|
│ └─► Load: OP_07_DISK_OPTIMIZATION.md
|
||||||
|
│
|
||||||
|
├─► CONFIGURE settings?
|
||||||
|
│ ├─ "change", "modify", "settings", "parameters"
|
||||||
|
│ ├─ session.exposed.task_type = TaskType.CONFIGURE_SETTINGS
|
||||||
|
│ └─► Load relevant SYS_* protocol
|
||||||
|
│
|
||||||
|
├─► NEURAL acceleration?
|
||||||
|
│ ├─ "neural", "surrogate", "turbo", "GNN"
|
||||||
|
│ ├─ session.exposed.task_type = TaskType.NEURAL_ACCELERATION
|
||||||
|
│ └─► Load: SYS_14_NEURAL_ACCELERATION.md
|
||||||
|
│
|
||||||
|
└─► EXTEND functionality?
|
||||||
|
├─ "add extractor", "new hook", "create protocol"
|
||||||
|
└─► Check privilege, then load EXT_* protocol
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Protocol Routing Table (With Context Loading)
|
||||||
|
|
||||||
|
| User Intent | Keywords | Protocol | Skill to Load | Playbook Filter |
|
||||||
|
|-------------|----------|----------|---------------|-----------------|
|
||||||
|
| Create study | "new", "set up", "create" | OP_01 | study-creation-core.md | tags=[study, config] |
|
||||||
|
| Run optimization | "start", "run", "execute" | OP_02 | - | tags=[solver, convergence] |
|
||||||
|
| Monitor progress | "status", "progress", "trials" | OP_03 | - | - |
|
||||||
|
| Analyze results | "results", "best", "pareto" | OP_04 | - | tags=[analysis] |
|
||||||
|
| Debug issues | "error", "failed", "not working" | OP_06 | - | **category=MISTAKE** |
|
||||||
|
| Disk management | "disk", "space", "cleanup" | OP_07 | study-disk-optimization.md | - |
|
||||||
|
| Neural surrogates | "neural", "surrogate", "turbo" | SYS_14 | neural-acceleration.md | tags=[neural, surrogate] |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Playbook Integration Pattern
|
||||||
|
|
||||||
|
### Loading Playbook Context
|
||||||
|
|
||||||
|
```python
|
||||||
|
def load_context_for_task(task_type: TaskType, session: AtomizerSessionState):
|
||||||
|
"""Load full context including playbook for LLM consumption."""
|
||||||
|
context_parts = []
|
||||||
|
|
||||||
|
# 1. Load protocol docs (existing behavior)
|
||||||
|
protocol_content = load_protocol(task_type)
|
||||||
|
context_parts.append(protocol_content)
|
||||||
|
|
||||||
|
# 2. Load session state (exposed only)
|
||||||
|
context_parts.append(session.get_llm_context())
|
||||||
|
|
||||||
|
# 3. Load relevant playbook items
|
||||||
|
playbook = AtomizerPlaybook.load(PLAYBOOK_PATH)
|
||||||
|
playbook_context = playbook.get_context_for_task(
|
||||||
|
task_type=task_type.value,
|
||||||
|
max_items=15,
|
||||||
|
min_confidence=0.6
|
||||||
|
)
|
||||||
|
context_parts.append(playbook_context)
|
||||||
|
|
||||||
|
# 4. Add error-specific items if debugging
|
||||||
|
if task_type == TaskType.DEBUG_ERROR:
|
||||||
|
mistakes = playbook.get_by_category(InsightCategory.MISTAKE)
|
||||||
|
for item in mistakes[:5]:
|
||||||
|
context_parts.append(item.to_context_string())
|
||||||
|
|
||||||
|
return "\n\n---\n\n".join(context_parts)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Real-Time Recording
|
||||||
|
|
||||||
|
**CRITICAL**: Record insights IMMEDIATELY when they occur. Do not wait until session end.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# On discovering a workaround
|
||||||
|
playbook.add_insight(
|
||||||
|
category=InsightCategory.WORKFLOW,
|
||||||
|
content="For mesh update issues, load _i.prt file before UpdateFemodel()",
|
||||||
|
tags=["mesh", "nx", "update"]
|
||||||
|
)
|
||||||
|
playbook.save(PLAYBOOK_PATH)
|
||||||
|
|
||||||
|
# On trial failure
|
||||||
|
playbook.add_insight(
|
||||||
|
category=InsightCategory.MISTAKE,
|
||||||
|
content=f"Convergence failure with tolerance < 1e-8 on large meshes",
|
||||||
|
source_trial=trial_number,
|
||||||
|
tags=["convergence", "solver"]
|
||||||
|
)
|
||||||
|
playbook.save(PLAYBOOK_PATH)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Error Handling Protocol (Enhanced)
|
||||||
|
|
||||||
|
When ANY error occurs:
|
||||||
|
|
||||||
|
1. **Preserve the error** - Add to session state
|
||||||
|
2. **Check playbook** - Look for matching mistake patterns
|
||||||
|
3. **Learn from it** - If novel error, add to playbook
|
||||||
|
4. **Show to user** - Include error context in response
|
||||||
|
|
||||||
|
```python
|
||||||
|
# On error
|
||||||
|
session.add_error(f"{error_type}: {error_message}", error_type=error_type)
|
||||||
|
|
||||||
|
# Check playbook for similar errors
|
||||||
|
similar = playbook.search_by_content(error_message, category=InsightCategory.MISTAKE)
|
||||||
|
if similar:
|
||||||
|
print(f"Known issue: {similar[0].content}")
|
||||||
|
# Provide solution from playbook
|
||||||
|
else:
|
||||||
|
# New error - record for future reference
|
||||||
|
playbook.add_insight(
|
||||||
|
category=InsightCategory.MISTAKE,
|
||||||
|
content=f"{error_type}: {error_message[:200]}",
|
||||||
|
tags=["error", error_type]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Context Budget Management
|
||||||
|
|
||||||
|
Total context budget: ~100K tokens
|
||||||
|
|
||||||
|
Allocation:
|
||||||
|
- **Stable prefix**: 5K tokens (cached across requests)
|
||||||
|
- **Protocols**: 10K tokens
|
||||||
|
- **Playbook items**: 5K tokens
|
||||||
|
- **Session state**: 2K tokens
|
||||||
|
- **Conversation history**: 30K tokens
|
||||||
|
- **Working space**: 48K tokens
|
||||||
|
|
||||||
|
If approaching limit:
|
||||||
|
1. Trigger compaction of old events
|
||||||
|
2. Reduce playbook items to top 5
|
||||||
|
3. Summarize conversation history
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Execution Framework (AVERVS)
|
||||||
|
|
||||||
|
For ANY task, follow this pattern:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. ANNOUNCE → State what you're about to do
|
||||||
|
2. VALIDATE → Check prerequisites are met
|
||||||
|
3. EXECUTE → Perform the action
|
||||||
|
4. RECORD → Record outcome to playbook (NEW!)
|
||||||
|
5. VERIFY → Confirm success
|
||||||
|
6. REPORT → Summarize what was done
|
||||||
|
7. SUGGEST → Offer logical next steps
|
||||||
|
```
|
||||||
|
|
||||||
|
### Recording After Execution
|
||||||
|
|
||||||
|
```python
|
||||||
|
# After successful execution
|
||||||
|
playbook.add_insight(
|
||||||
|
category=InsightCategory.STRATEGY,
|
||||||
|
content=f"Approach worked: {brief_description}",
|
||||||
|
tags=relevant_tags
|
||||||
|
)
|
||||||
|
|
||||||
|
# After failure
|
||||||
|
playbook.add_insight(
|
||||||
|
category=InsightCategory.MISTAKE,
|
||||||
|
content=f"Failed approach: {brief_description}. Reason: {reason}",
|
||||||
|
tags=relevant_tags
|
||||||
|
)
|
||||||
|
|
||||||
|
# Always save after recording
|
||||||
|
playbook.save(PLAYBOOK_PATH)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Session Closing Checklist (Enhanced)
|
||||||
|
|
||||||
|
Before ending a session, complete:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ SESSION CLOSING (v3.0) │
|
||||||
|
├─────────────────────────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ 1. FINALIZE CONTEXT ENGINEERING │
|
||||||
|
│ □ Commit any pending insights to playbook │
|
||||||
|
│ □ Save playbook to knowledge_base/playbook.json │
|
||||||
|
│ □ Export learning report if optimization completed │
|
||||||
|
│ │
|
||||||
|
│ 2. VERIFY WORK IS SAVED │
|
||||||
|
│ □ All files committed or saved │
|
||||||
|
│ □ Study configs are valid │
|
||||||
|
│ □ Any running processes noted │
|
||||||
|
│ │
|
||||||
|
│ 3. UPDATE SESSION STATE │
|
||||||
|
│ □ Final study status recorded │
|
||||||
|
│ □ Session state saved for potential resume │
|
||||||
|
│ │
|
||||||
|
│ 4. SUMMARIZE FOR USER │
|
||||||
|
│ □ What was accomplished │
|
||||||
|
│ □ What the system learned (new playbook items) │
|
||||||
|
│ □ Current state of any studies │
|
||||||
|
│ □ Recommended next steps │
|
||||||
|
│ │
|
||||||
|
└─────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Finalization Code
|
||||||
|
|
||||||
|
```python
|
||||||
|
# At session end
|
||||||
|
from optimization_engine.context import FeedbackLoop, save_playbook
|
||||||
|
|
||||||
|
# If optimization was run, finalize learning
|
||||||
|
if optimization_completed:
|
||||||
|
feedback = FeedbackLoop(playbook_path)
|
||||||
|
result = feedback.finalize_study({
|
||||||
|
"name": study_name,
|
||||||
|
"total_trials": n_trials,
|
||||||
|
"best_value": best_value,
|
||||||
|
"convergence_rate": success_rate
|
||||||
|
})
|
||||||
|
print(f"Learning finalized: {result['insights_added']} insights added")
|
||||||
|
|
||||||
|
# Always save playbook
|
||||||
|
save_playbook()
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Context Engineering Components Reference
|
||||||
|
|
||||||
|
| Component | Purpose | Location |
|
||||||
|
|-----------|---------|----------|
|
||||||
|
| **AtomizerPlaybook** | Knowledge store with helpful/harmful tracking | `optimization_engine/context/playbook.py` |
|
||||||
|
| **AtomizerReflector** | Analyzes outcomes, extracts insights | `optimization_engine/context/reflector.py` |
|
||||||
|
| **AtomizerSessionState** | Context isolation (exposed/isolated) | `optimization_engine/context/session_state.py` |
|
||||||
|
| **FeedbackLoop** | Connects outcomes to playbook updates | `optimization_engine/context/feedback_loop.py` |
|
||||||
|
| **CompactionManager** | Handles long sessions | `optimization_engine/context/compaction.py` |
|
||||||
|
| **ContextCacheOptimizer** | KV-cache optimization | `optimization_engine/context/cache_monitor.py` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Paths
|
||||||
|
|
||||||
|
### "I just want to run an optimization"
|
||||||
|
1. Initialize session state as RUN_OPTIMIZATION
|
||||||
|
2. Load playbook items for [solver, convergence]
|
||||||
|
3. Load OP_02_RUN_OPTIMIZATION.md
|
||||||
|
4. After run, finalize feedback loop
|
||||||
|
|
||||||
|
### "Something broke"
|
||||||
|
1. Initialize session state as DEBUG_ERROR
|
||||||
|
2. Load ALL mistake items from playbook
|
||||||
|
3. Load OP_06_TROUBLESHOOT.md
|
||||||
|
4. Record any new errors discovered
|
||||||
|
|
||||||
|
### "What did my optimization find?"
|
||||||
|
1. Initialize session state as ANALYZE_RESULTS
|
||||||
|
2. Load OP_04_ANALYZE_RESULTS.md
|
||||||
|
3. Query the study database
|
||||||
|
4. Generate report
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Constraints (Always Apply)
|
||||||
|
|
||||||
|
1. **Python Environment**: Always use `conda activate atomizer`
|
||||||
|
2. **Never modify master files**: Copy NX files to study working directory first
|
||||||
|
3. **Code reuse**: Check `optimization_engine/extractors/` before writing new extraction code
|
||||||
|
4. **Validation**: Always validate config before running optimization
|
||||||
|
5. **Record immediately**: Don't wait until session end to record insights
|
||||||
|
6. **Save playbook**: After every insight, save the playbook
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Migration from v2.0
|
||||||
|
|
||||||
|
If upgrading from BOOTSTRAP v2.0:
|
||||||
|
|
||||||
|
1. The LAC system is now superseded by AtomizerPlaybook
|
||||||
|
2. Session insights are now structured PlaybookItems
|
||||||
|
3. Helpful/harmful tracking replaces simple confidence scores
|
||||||
|
4. Context is now explicitly exposed vs isolated
|
||||||
|
|
||||||
|
The old LAC files in `knowledge_base/lac/` are still readable but new insights should use the playbook system.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Atomizer v3.0: Where engineers talk, AI optimizes, and the system learns.*
|
||||||
@@ -1,11 +1,11 @@
|
|||||||
---
|
---
|
||||||
skill_id: SKILL_001
|
skill_id: SKILL_001
|
||||||
version: 2.2
|
version: 2.4
|
||||||
last_updated: 2025-12-28
|
last_updated: 2025-12-31
|
||||||
type: reference
|
type: reference
|
||||||
code_dependencies:
|
code_dependencies:
|
||||||
- optimization_engine/extractors/__init__.py
|
- optimization_engine/extractors/__init__.py
|
||||||
- optimization_engine/method_selector.py
|
- optimization_engine/core/method_selector.py
|
||||||
- optimization_engine/utils/trial_manager.py
|
- optimization_engine/utils/trial_manager.py
|
||||||
- optimization_engine/utils/dashboard_db.py
|
- optimization_engine/utils/dashboard_db.py
|
||||||
requires_skills:
|
requires_skills:
|
||||||
@@ -14,8 +14,8 @@ requires_skills:
|
|||||||
|
|
||||||
# Atomizer Quick Reference Cheatsheet
|
# Atomizer Quick Reference Cheatsheet
|
||||||
|
|
||||||
**Version**: 2.2
|
**Version**: 2.4
|
||||||
**Updated**: 2025-12-28
|
**Updated**: 2025-12-31
|
||||||
**Purpose**: Rapid lookup for common operations. "I want X → Use Y"
|
**Purpose**: Rapid lookup for common operations. "I want X → Use Y"
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -30,9 +30,11 @@ requires_skills:
|
|||||||
| See best results | OP_04 | `optuna-dashboard sqlite:///study.db` or dashboard |
|
| See best results | OP_04 | `optuna-dashboard sqlite:///study.db` or dashboard |
|
||||||
| Export neural training data | OP_05 | `python run_optimization.py --export-training` |
|
| Export neural training data | OP_05 | `python run_optimization.py --export-training` |
|
||||||
| Fix an error | OP_06 | Read error log → follow diagnostic tree |
|
| Fix an error | OP_06 | Read error log → follow diagnostic tree |
|
||||||
|
| **Free disk space** | **OP_07** | `archive_study.bat cleanup <study> --execute` |
|
||||||
| Add custom physics extractor | EXT_01 | Create in `optimization_engine/extractors/` |
|
| Add custom physics extractor | EXT_01 | Create in `optimization_engine/extractors/` |
|
||||||
| Add lifecycle hook | EXT_02 | Create in `optimization_engine/plugins/` |
|
| Add lifecycle hook | EXT_02 | Create in `optimization_engine/plugins/` |
|
||||||
| Generate physics insight | SYS_16 | `python -m optimization_engine.insights generate <study>` |
|
| Generate physics insight | SYS_16 | `python -m optimization_engine.insights generate <study>` |
|
||||||
|
| **Manage knowledge/playbook** | **SYS_17** | `from optimization_engine.context import AtomizerPlaybook` |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -89,13 +91,31 @@ Question: Do you have 2-3 competing goals?
|
|||||||
### Neural Network Acceleration
|
### Neural Network Acceleration
|
||||||
```
|
```
|
||||||
Question: Do you need >50 trials OR surrogate model?
|
Question: Do you need >50 trials OR surrogate model?
|
||||||
├─ Yes
|
├─ Yes, have 500+ historical samples
|
||||||
│ └─► Protocol 14 (configure surrogate_settings in config)
|
│ └─► SYS_16 SAT v3 (Self-Aware Turbo) - BEST RESULTS
|
||||||
|
│
|
||||||
|
├─ Yes, have 50-500 samples
|
||||||
|
│ └─► Protocol 14 with ensemble surrogate
|
||||||
│
|
│
|
||||||
└─ Training data export needed?
|
└─ Training data export needed?
|
||||||
└─► OP_05_EXPORT_TRAINING_DATA.md
|
└─► OP_05_EXPORT_TRAINING_DATA.md
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### SAT v3 (Self-Aware Turbo) - NEW BEST METHOD
|
||||||
|
```
|
||||||
|
When: Have 500+ historical FEA samples from prior studies
|
||||||
|
Result: V9 achieved WS=205.58 (12% better than TPE)
|
||||||
|
|
||||||
|
Key settings:
|
||||||
|
├─ n_ensemble_models: 5
|
||||||
|
├─ adaptive exploration: 15% → 8% → 3%
|
||||||
|
├─ mass_soft_threshold: 118.0 kg
|
||||||
|
├─ exploit_near_best_ratio: 0.7
|
||||||
|
└─ lbfgs_polish_trials: 10
|
||||||
|
|
||||||
|
Reference: SYS_16_SELF_AWARE_TURBO.md
|
||||||
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Configuration Quick Reference
|
## Configuration Quick Reference
|
||||||
@@ -141,7 +161,7 @@ Question: Do you need >50 trials OR surrogate model?
|
|||||||
Exploits surrogate differentiability for **100-1000x faster** local refinement:
|
Exploits surrogate differentiability for **100-1000x faster** local refinement:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from optimization_engine.gradient_optimizer import GradientOptimizer, run_lbfgs_polish
|
from optimization_engine.core.gradient_optimizer import GradientOptimizer, run_lbfgs_polish
|
||||||
|
|
||||||
# Quick usage - polish from top FEA candidates
|
# Quick usage - polish from top FEA candidates
|
||||||
results = run_lbfgs_polish(study_dir, n_starts=20, n_iterations=100)
|
results = run_lbfgs_polish(study_dir, n_starts=20, n_iterations=100)
|
||||||
@@ -153,7 +173,7 @@ result = optimizer.optimize(starting_points=top_candidates, method='lbfgs')
|
|||||||
|
|
||||||
**CLI usage**:
|
**CLI usage**:
|
||||||
```bash
|
```bash
|
||||||
python -m optimization_engine.gradient_optimizer studies/my_study --n-starts 20
|
python -m optimization_engine.core.gradient_optimizer studies/my_study --n-starts 20
|
||||||
|
|
||||||
# Or per-study script (if available)
|
# Or per-study script (if available)
|
||||||
python run_lbfgs_polish.py --n-starts 20 --grid-then-grad
|
python run_lbfgs_polish.py --n-starts 20 --grid-then-grad
|
||||||
@@ -219,6 +239,48 @@ python -c "import optuna; s=optuna.load_study('my_study', 'sqlite:///3_results/s
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Disk Space Management (OP_07)
|
||||||
|
|
||||||
|
FEA studies consume massive disk space. After completion, clean up regenerable files:
|
||||||
|
|
||||||
|
### Quick Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Analyze disk usage
|
||||||
|
archive_study.bat analyze studies\M1_Mirror
|
||||||
|
|
||||||
|
# Cleanup completed study (dry run first!)
|
||||||
|
archive_study.bat cleanup studies\M1_Mirror\m1_mirror_V12
|
||||||
|
archive_study.bat cleanup studies\M1_Mirror\m1_mirror_V12 --execute
|
||||||
|
|
||||||
|
# Archive to dalidou server
|
||||||
|
archive_study.bat archive studies\M1_Mirror\m1_mirror_V12 --execute
|
||||||
|
|
||||||
|
# List remote archives
|
||||||
|
archive_study.bat list
|
||||||
|
```
|
||||||
|
|
||||||
|
### What Gets Deleted vs Kept
|
||||||
|
|
||||||
|
| KEEP | DELETE |
|
||||||
|
|------|--------|
|
||||||
|
| `.op2` (Nastran results) | `.prt, .fem, .sim` (copies of master) |
|
||||||
|
| `.json` (params/metadata) | `.dat` (solver input) |
|
||||||
|
| `1_setup/` (master files) | `.f04, .f06, .log` (solver logs) |
|
||||||
|
| `3_results/` (database) | `.afm, .diag, .bak` (temp files) |
|
||||||
|
|
||||||
|
### Typical Savings
|
||||||
|
|
||||||
|
| Stage | M1_Mirror Example |
|
||||||
|
|-------|-------------------|
|
||||||
|
| Full | 194 GB |
|
||||||
|
| After cleanup | 114 GB (41% saved) |
|
||||||
|
| Archived to server | 5 GB local (97% saved) |
|
||||||
|
|
||||||
|
**Full details**: `docs/protocols/operations/OP_07_DISK_OPTIMIZATION.md`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## LAC (Learning Atomizer Core) Commands
|
## LAC (Learning Atomizer Core) Commands
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -323,6 +385,7 @@ Without it, `UpdateFemodel()` runs but the mesh doesn't change!
|
|||||||
| 14 | Neural | Surrogate model acceleration |
|
| 14 | Neural | Surrogate model acceleration |
|
||||||
| 15 | Method Selector | Recommends optimization strategy |
|
| 15 | Method Selector | Recommends optimization strategy |
|
||||||
| 16 | Study Insights | Physics visualizations (Zernike, stress, modal) |
|
| 16 | Study Insights | Physics visualizations (Zernike, stress, modal) |
|
||||||
|
| 17 | Context Engineering | ACE framework - self-improving knowledge system |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -506,3 +569,106 @@ convert_custom_to_optuna(db_path, study_name)
|
|||||||
- Trial numbers **NEVER reset** across study lifetime
|
- Trial numbers **NEVER reset** across study lifetime
|
||||||
- Surrogate predictions (5K per batch) are NOT logged as trials
|
- Surrogate predictions (5K per batch) are NOT logged as trials
|
||||||
- Only FEA-validated results become trials
|
- Only FEA-validated results become trials
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Context Engineering Quick Reference (SYS_17)
|
||||||
|
|
||||||
|
The ACE (Agentic Context Engineering) framework enables self-improving optimization through structured knowledge capture.
|
||||||
|
|
||||||
|
### Core Components
|
||||||
|
|
||||||
|
| Component | Purpose | Key Function |
|
||||||
|
|-----------|---------|--------------|
|
||||||
|
| **AtomizerPlaybook** | Structured knowledge store | `playbook.add_insight()`, `playbook.get_context_for_task()` |
|
||||||
|
| **AtomizerReflector** | Extracts insights from outcomes | `reflector.analyze_outcome()` |
|
||||||
|
| **AtomizerSessionState** | Context isolation (exposed/isolated) | `session.get_llm_context()` |
|
||||||
|
| **FeedbackLoop** | Automated learning | `feedback.process_trial_result()` |
|
||||||
|
| **CompactionManager** | Long-session handling | `compactor.maybe_compact()` |
|
||||||
|
| **CacheMonitor** | KV-cache optimization | `optimizer.track_completion()` |
|
||||||
|
|
||||||
|
### Python API Quick Reference
|
||||||
|
|
||||||
|
```python
|
||||||
|
from optimization_engine.context import (
|
||||||
|
AtomizerPlaybook, AtomizerReflector, get_session,
|
||||||
|
InsightCategory, TaskType, FeedbackLoop
|
||||||
|
)
|
||||||
|
|
||||||
|
# Load playbook
|
||||||
|
playbook = AtomizerPlaybook.load(Path("knowledge_base/playbook.json"))
|
||||||
|
|
||||||
|
# Add an insight
|
||||||
|
playbook.add_insight(
|
||||||
|
category=InsightCategory.STRATEGY, # str, mis, tool, cal, dom, wf
|
||||||
|
content="CMA-ES converges faster on smooth mirror surfaces",
|
||||||
|
tags=["mirror", "sampler", "convergence"]
|
||||||
|
)
|
||||||
|
playbook.save(Path("knowledge_base/playbook.json"))
|
||||||
|
|
||||||
|
# Get context for LLM
|
||||||
|
context = playbook.get_context_for_task(
|
||||||
|
task_type="optimization",
|
||||||
|
max_items=15,
|
||||||
|
min_confidence=0.5
|
||||||
|
)
|
||||||
|
|
||||||
|
# Record feedback
|
||||||
|
playbook.record_outcome(item_id="str_001", helpful=True)
|
||||||
|
|
||||||
|
# Session state
|
||||||
|
session = get_session()
|
||||||
|
session.exposed.task_type = TaskType.RUN_OPTIMIZATION
|
||||||
|
session.add_action("Started optimization run")
|
||||||
|
llm_context = session.get_llm_context()
|
||||||
|
|
||||||
|
# Feedback loop (automated learning)
|
||||||
|
feedback = FeedbackLoop(playbook_path)
|
||||||
|
feedback.process_trial_result(
|
||||||
|
trial_number=42,
|
||||||
|
params={'thickness': 10.5},
|
||||||
|
objectives={'mass': 5.2},
|
||||||
|
is_feasible=True
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Insight Categories
|
||||||
|
|
||||||
|
| Category | Code | Use For |
|
||||||
|
|----------|------|---------|
|
||||||
|
| Strategy | `str` | Optimization approaches that work |
|
||||||
|
| Mistake | `mis` | Common errors to avoid |
|
||||||
|
| Tool | `tool` | Tool usage patterns |
|
||||||
|
| Calculation | `cal` | Formulas and calculations |
|
||||||
|
| Domain | `dom` | FEA/NX domain knowledge |
|
||||||
|
| Workflow | `wf` | Process patterns |
|
||||||
|
|
||||||
|
### Playbook Item Format
|
||||||
|
|
||||||
|
```
|
||||||
|
[str_001] helpful=5 harmful=0 :: CMA-ES converges faster on smooth surfaces
|
||||||
|
```
|
||||||
|
|
||||||
|
- `net_score = helpful - harmful`
|
||||||
|
- `confidence = helpful / (helpful + harmful)`
|
||||||
|
- Items with `net_score < -3` are pruned
|
||||||
|
|
||||||
|
### REST API Endpoints
|
||||||
|
|
||||||
|
| Endpoint | Method | Purpose |
|
||||||
|
|----------|--------|---------|
|
||||||
|
| `/api/context/playbook` | GET | Playbook summary stats |
|
||||||
|
| `/api/context/playbook/items` | GET | List items with filters |
|
||||||
|
| `/api/context/playbook/feedback` | POST | Record helpful/harmful |
|
||||||
|
| `/api/context/playbook/insights` | POST | Add new insight |
|
||||||
|
| `/api/context/playbook/prune` | POST | Remove harmful items |
|
||||||
|
| `/api/context/session` | GET | Current session state |
|
||||||
|
| `/api/context/learning/report` | GET | Comprehensive learning report |
|
||||||
|
|
||||||
|
### Dashboard URL
|
||||||
|
|
||||||
|
| Service | URL | Purpose |
|
||||||
|
|---------|-----|---------|
|
||||||
|
| Context API | `http://localhost:5000/api/context` | Playbook management |
|
||||||
|
|
||||||
|
**Full documentation**: `docs/protocols/system/SYS_17_CONTEXT_ENGINEERING.md`
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
464
.claude/skills/modules/study-disk-optimization.md
Normal file
464
.claude/skills/modules/study-disk-optimization.md
Normal file
@@ -0,0 +1,464 @@
|
|||||||
|
# Study Disk Optimization Module
|
||||||
|
|
||||||
|
## Atomizer Disk Space Management System
|
||||||
|
|
||||||
|
**Version:** 1.0
|
||||||
|
**Created:** 2025-12-29
|
||||||
|
**Status:** PRODUCTION READY
|
||||||
|
**Impact:** Reduced M1_Mirror from 194 GB → 114 GB (80 GB freed, 41% reduction)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Executive Summary
|
||||||
|
|
||||||
|
FEA optimization studies consume massive disk space due to per-trial file copying. This module provides:
|
||||||
|
|
||||||
|
1. **Local Cleanup** - Remove regenerable files from completed studies (50%+ savings)
|
||||||
|
2. **Remote Archival** - Archive to dalidou server (14TB available)
|
||||||
|
3. **On-Demand Restore** - Pull archived studies when needed
|
||||||
|
|
||||||
|
### Key Insight
|
||||||
|
|
||||||
|
Each trial folder contains ~150 MB, but only **~70 MB is essential** (OP2 results + metadata). The rest are copies of master files that can be regenerated.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 1: File Classification
|
||||||
|
|
||||||
|
### Essential Files (KEEP)
|
||||||
|
|
||||||
|
| Extension | Purpose | Typical Size |
|
||||||
|
|-----------|---------|--------------|
|
||||||
|
| `.op2` | Nastran binary results | 68 MB |
|
||||||
|
| `.json` | Parameters, results, metadata | <1 MB |
|
||||||
|
| `.npz` | Pre-computed Zernike coefficients | <1 MB |
|
||||||
|
| `.html` | Generated reports | <1 MB |
|
||||||
|
| `.png` | Visualization images | <1 MB |
|
||||||
|
| `.csv` | Exported data tables | <1 MB |
|
||||||
|
|
||||||
|
### Deletable Files (REGENERABLE)
|
||||||
|
|
||||||
|
| Extension | Purpose | Why Deletable |
|
||||||
|
|-----------|---------|---------------|
|
||||||
|
| `.prt` | NX part files | Copy of master in `1_setup/` |
|
||||||
|
| `.fem` | FEM mesh files | Copy of master |
|
||||||
|
| `.sim` | Simulation files | Copy of master |
|
||||||
|
| `.afm` | Assembly FEM | Regenerable |
|
||||||
|
| `.dat` | Solver input deck | Regenerable from params |
|
||||||
|
| `.f04` | Nastran output log | Diagnostic only |
|
||||||
|
| `.f06` | Nastran printed output | Diagnostic only |
|
||||||
|
| `.log` | Generic logs | Diagnostic only |
|
||||||
|
| `.diag` | Diagnostic files | Diagnostic only |
|
||||||
|
| `.txt` | Temp text files | Intermediate data |
|
||||||
|
| `.exp` | Expression files | Regenerable |
|
||||||
|
| `.bak` | Backup files | Not needed |
|
||||||
|
|
||||||
|
### Protected Folders (NEVER TOUCH)
|
||||||
|
|
||||||
|
| Folder | Reason |
|
||||||
|
|--------|--------|
|
||||||
|
| `1_setup/` | Master model files (source of truth) |
|
||||||
|
| `3_results/` | Final database, reports, best designs |
|
||||||
|
| `best_design_archive/` | Archived optimal configurations |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 2: Disk Usage Analysis
|
||||||
|
|
||||||
|
### M1_Mirror Project Baseline (Dec 2025)
|
||||||
|
|
||||||
|
```
|
||||||
|
Total: 194 GB across 28 studies, 2000+ trials
|
||||||
|
|
||||||
|
By File Type:
|
||||||
|
.op2 94 GB (48.5%) - Nastran results [ESSENTIAL]
|
||||||
|
.prt 41 GB (21.4%) - NX parts [DELETABLE]
|
||||||
|
.fem 22 GB (11.5%) - FEM mesh [DELETABLE]
|
||||||
|
.dat 22 GB (11.3%) - Solver input [DELETABLE]
|
||||||
|
.sim 9 GB (4.5%) - Simulation [DELETABLE]
|
||||||
|
.afm 5 GB (2.5%) - Assembly FEM [DELETABLE]
|
||||||
|
Other <1 GB (<1%) - Logs, configs [MIXED]
|
||||||
|
|
||||||
|
By Folder:
|
||||||
|
2_iterations/ 168 GB (87%) - Per-trial data
|
||||||
|
3_results/ 22 GB (11%) - Final results
|
||||||
|
1_setup/ 4 GB (2%) - Master models
|
||||||
|
```
|
||||||
|
|
||||||
|
### Per-Trial Breakdown (Typical V11+ Structure)
|
||||||
|
|
||||||
|
```
|
||||||
|
iter1/
|
||||||
|
assy_m1_assyfem1_sim1-solution_1.op2 68.15 MB [KEEP]
|
||||||
|
M1_Blank.prt 29.94 MB [DELETE]
|
||||||
|
assy_m1_assyfem1_sim1-solution_1.dat 15.86 MB [DELETE]
|
||||||
|
M1_Blank_fem1.fem 14.07 MB [DELETE]
|
||||||
|
ASSY_M1_assyfem1_sim1.sim 7.47 MB [DELETE]
|
||||||
|
M1_Blank_fem1_i.prt 5.20 MB [DELETE]
|
||||||
|
ASSY_M1_assyfem1.afm 4.13 MB [DELETE]
|
||||||
|
M1_Vertical_Support_Skeleton_fem1.fem 3.76 MB [DELETE]
|
||||||
|
... (logs, temps) <1.00 MB [DELETE]
|
||||||
|
_temp_part_properties.json 0.00 MB [KEEP]
|
||||||
|
-------------------------------------------------------
|
||||||
|
TOTAL: 149.67 MB
|
||||||
|
Essential only: 68.15 MB
|
||||||
|
Savings: 54.5%
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 3: Implementation
|
||||||
|
|
||||||
|
### Core Utility
|
||||||
|
|
||||||
|
**Location:** `optimization_engine/utils/study_archiver.py`
|
||||||
|
|
||||||
|
```python
|
||||||
|
from optimization_engine.utils.study_archiver import (
|
||||||
|
analyze_study, # Get disk usage analysis
|
||||||
|
cleanup_study, # Remove deletable files
|
||||||
|
archive_to_remote, # Archive to dalidou
|
||||||
|
restore_from_remote, # Restore from dalidou
|
||||||
|
list_remote_archives, # List server archives
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Command Line Interface
|
||||||
|
|
||||||
|
**Batch Script:** `tools/archive_study.bat`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Analyze disk usage
|
||||||
|
archive_study.bat analyze studies\M1_Mirror
|
||||||
|
archive_study.bat analyze studies\M1_Mirror\m1_mirror_V12
|
||||||
|
|
||||||
|
# Cleanup completed study (dry run by default)
|
||||||
|
archive_study.bat cleanup studies\M1_Mirror\m1_mirror_V12
|
||||||
|
archive_study.bat cleanup studies\M1_Mirror\m1_mirror_V12 --execute
|
||||||
|
|
||||||
|
# Archive to remote server
|
||||||
|
archive_study.bat archive studies\M1_Mirror\m1_mirror_V12 --execute
|
||||||
|
archive_study.bat archive studies\M1_Mirror\m1_mirror_V12 --execute --tailscale
|
||||||
|
|
||||||
|
# List remote archives
|
||||||
|
archive_study.bat list
|
||||||
|
archive_study.bat list --tailscale
|
||||||
|
|
||||||
|
# Restore from remote
|
||||||
|
archive_study.bat restore m1_mirror_V12
|
||||||
|
archive_study.bat restore m1_mirror_V12 --tailscale
|
||||||
|
```
|
||||||
|
|
||||||
|
### Python API
|
||||||
|
|
||||||
|
```python
|
||||||
|
from pathlib import Path
|
||||||
|
from optimization_engine.utils.study_archiver import (
|
||||||
|
analyze_study,
|
||||||
|
cleanup_study,
|
||||||
|
archive_to_remote,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Analyze
|
||||||
|
study_path = Path("studies/M1_Mirror/m1_mirror_V12")
|
||||||
|
analysis = analyze_study(study_path)
|
||||||
|
print(f"Total: {analysis['total_size_bytes']/1e9:.2f} GB")
|
||||||
|
print(f"Essential: {analysis['essential_size']/1e9:.2f} GB")
|
||||||
|
print(f"Deletable: {analysis['deletable_size']/1e9:.2f} GB")
|
||||||
|
|
||||||
|
# Cleanup (dry_run=False to execute)
|
||||||
|
deleted, freed = cleanup_study(study_path, dry_run=False)
|
||||||
|
print(f"Freed {freed/1e9:.2f} GB")
|
||||||
|
|
||||||
|
# Archive to server
|
||||||
|
success = archive_to_remote(study_path, use_tailscale=False, dry_run=False)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 4: Remote Server Configuration
|
||||||
|
|
||||||
|
### dalidou Server Specs
|
||||||
|
|
||||||
|
| Property | Value |
|
||||||
|
|----------|-------|
|
||||||
|
| Hostname | dalidou |
|
||||||
|
| Local IP | 192.168.86.50 |
|
||||||
|
| Tailscale IP | 100.80.199.40 |
|
||||||
|
| SSH User | papa |
|
||||||
|
| Archive Path | /srv/storage/atomizer-archive/ |
|
||||||
|
| Available Storage | 3.6 TB (SSD) + 12.7 TB (HDD) |
|
||||||
|
|
||||||
|
### First-Time Setup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. SSH into server and create archive directory
|
||||||
|
ssh papa@192.168.86.50
|
||||||
|
mkdir -p /srv/storage/atomizer-archive
|
||||||
|
|
||||||
|
# 2. Set up passwordless SSH (on Windows)
|
||||||
|
ssh-keygen -t ed25519 # If you don't have a key
|
||||||
|
ssh-copy-id papa@192.168.86.50
|
||||||
|
|
||||||
|
# 3. Test connection
|
||||||
|
ssh papa@192.168.86.50 "echo 'Connection OK'"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Archive Structure on Server
|
||||||
|
|
||||||
|
```
|
||||||
|
/srv/storage/atomizer-archive/
|
||||||
|
├── m1_mirror_V11_20251229.tar.gz # Compressed study archive
|
||||||
|
├── m1_mirror_V12_20251229.tar.gz
|
||||||
|
├── m1_mirror_flat_back_V3_20251229.tar.gz
|
||||||
|
└── manifest.json # Index of all archives
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 5: Recommended Workflows
|
||||||
|
|
||||||
|
### During Active Optimization
|
||||||
|
|
||||||
|
**Keep all files** - You may need to:
|
||||||
|
- Re-run specific failed trials
|
||||||
|
- Debug mesh issues
|
||||||
|
- Analyze intermediate results
|
||||||
|
|
||||||
|
### After Study Completion
|
||||||
|
|
||||||
|
1. **Generate final report** (STUDY_REPORT.md)
|
||||||
|
2. **Archive best design** to `3_results/best_design_archive/`
|
||||||
|
3. **Run cleanup:**
|
||||||
|
```bash
|
||||||
|
archive_study.bat cleanup studies\M1_Mirror\m1_mirror_V12 --execute
|
||||||
|
```
|
||||||
|
4. **Verify results still accessible:**
|
||||||
|
- Database queries work
|
||||||
|
- Best design files intact
|
||||||
|
- OP2 files for Zernike extraction present
|
||||||
|
|
||||||
|
### For Long-Term Storage
|
||||||
|
|
||||||
|
1. **After cleanup**, archive to server:
|
||||||
|
```bash
|
||||||
|
archive_study.bat archive studies\M1_Mirror\m1_mirror_V12 --execute
|
||||||
|
```
|
||||||
|
2. **Optionally delete local** study folder
|
||||||
|
3. **Keep only** `3_results/best_design_archive/` locally if needed
|
||||||
|
|
||||||
|
### When Revisiting Old Study
|
||||||
|
|
||||||
|
1. **Check if archived:**
|
||||||
|
```bash
|
||||||
|
archive_study.bat list
|
||||||
|
```
|
||||||
|
2. **Restore:**
|
||||||
|
```bash
|
||||||
|
archive_study.bat restore m1_mirror_V12
|
||||||
|
```
|
||||||
|
3. **If re-running trials needed**, master files in `1_setup/` allow full regeneration
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 6: Disk Space Targets
|
||||||
|
|
||||||
|
### Per-Project Guidelines
|
||||||
|
|
||||||
|
| Stage | Expected Size | Notes |
|
||||||
|
|-------|---------------|-------|
|
||||||
|
| Active (full) | 100% | All files present |
|
||||||
|
| Completed (cleaned) | ~50% | Deletables removed |
|
||||||
|
| Archived (minimal) | ~3% | Best design only locally |
|
||||||
|
|
||||||
|
### M1_Mirror Specific
|
||||||
|
|
||||||
|
| Stage | Size | Notes |
|
||||||
|
|-------|------|-------|
|
||||||
|
| Full | 194 GB | 28 studies, 2000+ trials |
|
||||||
|
| After cleanup | 114 GB | OP2 + metadata only |
|
||||||
|
| Minimal local | 5-10 GB | Best designs + database |
|
||||||
|
| Server archive | ~50 GB | Compressed |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 7: Safety Features
|
||||||
|
|
||||||
|
### Built-in Protections
|
||||||
|
|
||||||
|
1. **Dry run by default** - Must explicitly add `--execute`
|
||||||
|
2. **Master files untouched** - `1_setup/` is never modified
|
||||||
|
3. **Results preserved** - `3_results/` is never touched
|
||||||
|
4. **Essential files preserved** - OP2, JSON, NPZ always kept
|
||||||
|
5. **Archive verification** - rsync checks integrity
|
||||||
|
|
||||||
|
### What Cannot Be Recovered After Cleanup
|
||||||
|
|
||||||
|
| File Type | Recovery Method |
|
||||||
|
|-----------|-----------------|
|
||||||
|
| `.prt` | Copy from `1_setup/` + update params |
|
||||||
|
| `.fem` | Regenerate from `.prt` in NX |
|
||||||
|
| `.sim` | Recreate simulation setup |
|
||||||
|
| `.dat` | Regenerate from params.json + model |
|
||||||
|
| `.f04/.f06` | Re-run solver (if needed) |
|
||||||
|
|
||||||
|
**Note:** With `1_setup/` master files and `params.json`, ANY trial can be fully reconstructed. The only irreplaceable data is the OP2 results (which we keep).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 8: Troubleshooting
|
||||||
|
|
||||||
|
### SSH Connection Failed
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test connectivity
|
||||||
|
ping 192.168.86.50
|
||||||
|
|
||||||
|
# Test SSH
|
||||||
|
ssh papa@192.168.86.50 "echo connected"
|
||||||
|
|
||||||
|
# If on different network, use Tailscale
|
||||||
|
ssh papa@100.80.199.40 "echo connected"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Archive Upload Slow
|
||||||
|
|
||||||
|
Large studies (50+ GB) take time. Options:
|
||||||
|
- Run overnight
|
||||||
|
- Use wired LAN connection
|
||||||
|
- Pre-cleanup to reduce size
|
||||||
|
|
||||||
|
### Out of Disk Space During Archive
|
||||||
|
|
||||||
|
Archive is created locally first. Need ~1.5x study size free:
|
||||||
|
- 20 GB study = ~30 GB temp space required
|
||||||
|
|
||||||
|
### Cleanup Removed Wrong Files
|
||||||
|
|
||||||
|
If accidentally executed without dry run:
|
||||||
|
- OP2 files preserved (can still extract results)
|
||||||
|
- Master files in `1_setup/` intact
|
||||||
|
- Regenerate other files by re-running trial
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 9: Integration with Atomizer
|
||||||
|
|
||||||
|
### Protocol Reference
|
||||||
|
|
||||||
|
**Related Protocol:** `docs/protocols/operations/OP_07_DISK_OPTIMIZATION.md`
|
||||||
|
|
||||||
|
### Claude Commands
|
||||||
|
|
||||||
|
When user says:
|
||||||
|
- "analyze disk usage" → Run `analyze_study()`
|
||||||
|
- "clean up study" → Run `cleanup_study()` with confirmation
|
||||||
|
- "archive to server" → Run `archive_to_remote()`
|
||||||
|
- "restore study" → Run `restore_from_remote()`
|
||||||
|
|
||||||
|
### Automatic Suggestions
|
||||||
|
|
||||||
|
After optimization completion, suggest:
|
||||||
|
```
|
||||||
|
Optimization complete! The study is using X GB.
|
||||||
|
Would you like me to clean up regenerable files to save Y GB?
|
||||||
|
(This keeps all results but removes intermediate model copies)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 10: File Inventory
|
||||||
|
|
||||||
|
### Files Created
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `optimization_engine/utils/study_archiver.py` | Core utility module |
|
||||||
|
| `tools/archive_study.bat` | Windows batch script |
|
||||||
|
| `docs/protocols/operations/OP_07_DISK_OPTIMIZATION.md` | Full protocol |
|
||||||
|
| `.claude/skills/modules/study-disk-optimization.md` | This document |
|
||||||
|
|
||||||
|
### Dependencies
|
||||||
|
|
||||||
|
- Python 3.8+
|
||||||
|
- rsync (for remote operations, usually pre-installed)
|
||||||
|
- SSH client (for remote operations)
|
||||||
|
- Tailscale (optional, for remote access outside LAN)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Appendix A: Cleanup Results Log (Dec 2025)
|
||||||
|
|
||||||
|
### Initial Cleanup Run
|
||||||
|
|
||||||
|
| Study | Before | After | Freed | Files Deleted |
|
||||||
|
|-------|--------|-------|-------|---------------|
|
||||||
|
| m1_mirror_cost_reduction_V11 | 32.24 GB | 15.94 GB | 16.30 GB | 3,403 |
|
||||||
|
| m1_mirror_cost_reduction_flat_back_V3 | 52.50 GB | 26.87 GB | 25.63 GB | 5,084 |
|
||||||
|
| m1_mirror_cost_reduction_flat_back_V6 | 33.71 GB | 16.64 GB | 17.08 GB | 3,391 |
|
||||||
|
| m1_mirror_cost_reduction_V12 | 22.68 GB | 10.60 GB | 12.08 GB | 2,508 |
|
||||||
|
| m1_mirror_cost_reduction_flat_back_V1 | 8.76 GB | 4.54 GB | 4.22 GB | 813 |
|
||||||
|
| m1_mirror_cost_reduction_flat_back_V5 | 8.01 GB | 4.09 GB | 3.92 GB | 765 |
|
||||||
|
| m1_mirror_cost_reduction | 3.58 GB | 3.08 GB | 0.50 GB | 267 |
|
||||||
|
| **TOTAL** | **161.48 GB** | **81.76 GB** | **79.73 GB** | **16,231** |
|
||||||
|
|
||||||
|
### Project-Wide Summary
|
||||||
|
|
||||||
|
```
|
||||||
|
Before cleanup: 193.75 GB
|
||||||
|
After cleanup: 114.03 GB
|
||||||
|
Total freed: 79.72 GB (41% reduction)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Appendix B: Quick Reference Card
|
||||||
|
|
||||||
|
### Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Analyze
|
||||||
|
archive_study.bat analyze <path>
|
||||||
|
|
||||||
|
# Cleanup (always dry-run first!)
|
||||||
|
archive_study.bat cleanup <study> # Dry run
|
||||||
|
archive_study.bat cleanup <study> --execute # Execute
|
||||||
|
|
||||||
|
# Archive
|
||||||
|
archive_study.bat archive <study> --execute
|
||||||
|
archive_study.bat archive <study> --execute --tailscale
|
||||||
|
|
||||||
|
# Remote
|
||||||
|
archive_study.bat list
|
||||||
|
archive_study.bat restore <name>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Python
|
||||||
|
|
||||||
|
```python
|
||||||
|
from optimization_engine.utils.study_archiver import *
|
||||||
|
|
||||||
|
# Quick analysis
|
||||||
|
analysis = analyze_study(Path("studies/M1_Mirror"))
|
||||||
|
print(f"Deletable: {analysis['deletable_size']/1e9:.2f} GB")
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
cleanup_study(Path("studies/M1_Mirror/m1_mirror_V12"), dry_run=False)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Server Access
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Local
|
||||||
|
ssh papa@192.168.86.50
|
||||||
|
|
||||||
|
# Remote (Tailscale)
|
||||||
|
ssh papa@100.80.199.40
|
||||||
|
|
||||||
|
# Archive location
|
||||||
|
/srv/storage/atomizer-archive/
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*This module enables efficient disk space management for large-scale FEA optimization studies.*
|
||||||
249
.claude/skills/modules/study-interview-mode.md
Normal file
249
.claude/skills/modules/study-interview-mode.md
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
# Study Interview Mode Skill
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
|
||||||
|
This skill enables an intelligent interview-based approach to study creation. Instead of asking users to fill out complex configuration files, Claude guides them through a natural conversation to gather all necessary information for optimization study setup.
|
||||||
|
|
||||||
|
**This is now the DEFAULT mode for all study creation.**
|
||||||
|
|
||||||
|
## Triggers (DEFAULT for Study Creation)
|
||||||
|
|
||||||
|
This skill is automatically invoked when the user says ANY of:
|
||||||
|
- "create a study", "new study", "set up study"
|
||||||
|
- "create a study for my bracket"
|
||||||
|
- "optimize this", "optimize my model"
|
||||||
|
- "I want to minimize mass", "I want to reduce weight"
|
||||||
|
- Any study creation request
|
||||||
|
|
||||||
|
### Skip Interview (Manual Mode)
|
||||||
|
|
||||||
|
Only skip to manual mode when user explicitly requests:
|
||||||
|
- "skip interview", "quick setup", "manual config"
|
||||||
|
- Power users recreating known configurations
|
||||||
|
|
||||||
|
## Interview Flow
|
||||||
|
|
||||||
|
### Phase 1: Introspection
|
||||||
|
Before questions begin, automatically analyze the NX model:
|
||||||
|
```python
|
||||||
|
from optimization_engine.interview import StudyInterviewEngine
|
||||||
|
|
||||||
|
engine = StudyInterviewEngine(study_path)
|
||||||
|
|
||||||
|
# Run introspection first (if model available)
|
||||||
|
introspection = {
|
||||||
|
"expressions": [...], # From part introspection
|
||||||
|
"materials": [...], # From simulation
|
||||||
|
"load_cases": [...], # From simulation
|
||||||
|
"model_path": "...",
|
||||||
|
"sim_path": "..."
|
||||||
|
}
|
||||||
|
|
||||||
|
session = engine.start_interview(study_name, introspection=introspection)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Problem Definition
|
||||||
|
Ask about the study's purpose and context:
|
||||||
|
- What are you trying to optimize?
|
||||||
|
- Describe your study in a few words
|
||||||
|
|
||||||
|
### Phase 3: Objectives
|
||||||
|
Determine optimization goals:
|
||||||
|
- Primary goal (minimize mass, stress, displacement, etc.)
|
||||||
|
- Secondary objectives (if any)
|
||||||
|
- Multi-objective or single-objective?
|
||||||
|
|
||||||
|
The ExtractorMapper automatically assigns extractors:
|
||||||
|
- Mass → E4 (BDF Mass) or E5 (CAD Mass)
|
||||||
|
- Displacement → E1
|
||||||
|
- Stress → E3
|
||||||
|
- Frequency → E2
|
||||||
|
- Zernike → E8, E9, E10
|
||||||
|
|
||||||
|
### Phase 4: Constraints
|
||||||
|
Define physical limits:
|
||||||
|
- Material-aware validation (checks against yield stress)
|
||||||
|
- Auto-suggests safety factors
|
||||||
|
- Detects anti-patterns (e.g., mass minimization without constraints)
|
||||||
|
|
||||||
|
### Phase 5: Design Variables
|
||||||
|
Select parameters to vary:
|
||||||
|
- Dynamic options from introspection
|
||||||
|
- Auto-suggests bounds based on current values
|
||||||
|
- Detects too-wide or too-narrow bounds
|
||||||
|
|
||||||
|
### Phase 6: Validation
|
||||||
|
Final checks before generation:
|
||||||
|
- Run baseline simulation (optional)
|
||||||
|
- Verify all parameters accessible
|
||||||
|
- Check for conflicting constraints
|
||||||
|
|
||||||
|
### Phase 7: Review
|
||||||
|
Present StudyBlueprint for confirmation:
|
||||||
|
- Show all settings in readable format
|
||||||
|
- Allow what-if modifications
|
||||||
|
- Confirm or restart
|
||||||
|
|
||||||
|
## Key Classes
|
||||||
|
|
||||||
|
### StudyInterviewEngine
|
||||||
|
Main orchestrator:
|
||||||
|
```python
|
||||||
|
from optimization_engine.interview import StudyInterviewEngine
|
||||||
|
|
||||||
|
engine = StudyInterviewEngine(study_path)
|
||||||
|
session = engine.start_interview(study_name, introspection=introspection)
|
||||||
|
|
||||||
|
# Get first question
|
||||||
|
action = engine.get_first_question()
|
||||||
|
# Present action.message to user
|
||||||
|
|
||||||
|
# Process user answer
|
||||||
|
next_action = engine.process_answer(user_response)
|
||||||
|
|
||||||
|
# When complete, get blueprint
|
||||||
|
if next_action.action_type == "show_summary":
|
||||||
|
blueprint = next_action.blueprint
|
||||||
|
```
|
||||||
|
|
||||||
|
### InterviewState
|
||||||
|
Persisted interview state with JSON serialization:
|
||||||
|
```python
|
||||||
|
from optimization_engine.interview import InterviewState, InterviewStateManager
|
||||||
|
|
||||||
|
manager = InterviewStateManager(study_path)
|
||||||
|
state = manager.load_state() # Resume if exists
|
||||||
|
```
|
||||||
|
|
||||||
|
### StudyBlueprint
|
||||||
|
Validated configuration ready for generation:
|
||||||
|
```python
|
||||||
|
blueprint = engine.generate_blueprint()
|
||||||
|
config = blueprint.to_config_json() # For optimization_config.json
|
||||||
|
```
|
||||||
|
|
||||||
|
## Anti-Pattern Detection
|
||||||
|
|
||||||
|
The EngineeringValidator detects common mistakes:
|
||||||
|
- `mass_no_constraint`: Mass minimization without stress/displacement limits
|
||||||
|
- `stress_over_yield`: Stress constraint exceeds material yield
|
||||||
|
- `bounds_too_wide`: Design variable range > 100x
|
||||||
|
- `too_many_objectives`: More than 3 objectives
|
||||||
|
- `single_dv_many_trials`: Many trials for single variable
|
||||||
|
|
||||||
|
When detected, user is warned and asked to acknowledge.
|
||||||
|
|
||||||
|
## Materials Database
|
||||||
|
|
||||||
|
Built-in materials with properties:
|
||||||
|
- Aluminum alloys (6061-T6, 7075-T6)
|
||||||
|
- Steel grades (A36, 304 SS, 316 SS)
|
||||||
|
- Titanium (Ti-6Al-4V)
|
||||||
|
- Composites (CFRP, GFRP)
|
||||||
|
- Plastics (ABS, Nylon)
|
||||||
|
|
||||||
|
Fuzzy matching supports user input like "Al 6061", "aluminum", "6061-T6".
|
||||||
|
|
||||||
|
## Presenter Modes
|
||||||
|
|
||||||
|
### ClaudePresenter (Default)
|
||||||
|
Markdown-formatted for Claude conversation:
|
||||||
|
```markdown
|
||||||
|
### Question 1 of ~12: Problem Definition
|
||||||
|
|
||||||
|
What are you trying to optimize?
|
||||||
|
|
||||||
|
1. Minimize mass/weight
|
||||||
|
2. Minimize maximum stress
|
||||||
|
3. Minimize displacement
|
||||||
|
4. Maximize natural frequency
|
||||||
|
```
|
||||||
|
|
||||||
|
### DashboardPresenter
|
||||||
|
JSON events for React dashboard (future).
|
||||||
|
|
||||||
|
### CLIPresenter
|
||||||
|
Plain text for command-line interface (future).
|
||||||
|
|
||||||
|
## State Persistence
|
||||||
|
|
||||||
|
Interview state is saved after each answer:
|
||||||
|
```
|
||||||
|
studies/my_study/
|
||||||
|
├── .interview/
|
||||||
|
│ ├── state.json # Current state
|
||||||
|
│ ├── state.lock # Concurrency lock
|
||||||
|
│ ├── interview_log.md # Audit trail
|
||||||
|
│ └── backups/ # State history
|
||||||
|
│ ├── state_20260102_103000.json
|
||||||
|
│ └── ...
|
||||||
|
```
|
||||||
|
|
||||||
|
Supports resume after session interruption.
|
||||||
|
|
||||||
|
## Integration with Study Wizard
|
||||||
|
|
||||||
|
After interview completes:
|
||||||
|
```python
|
||||||
|
from optimization_engine.study.wizard import StudyWizard
|
||||||
|
|
||||||
|
wizard = StudyWizard.from_interview(blueprint, study_path)
|
||||||
|
wizard.generate()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example Conversation
|
||||||
|
|
||||||
|
**Claude**: Let me help you set up your optimization study. I'll ask a few questions to understand what you're trying to achieve.
|
||||||
|
|
||||||
|
**Claude**: ### Question 1 of ~12: Problem Definition
|
||||||
|
What are you trying to optimize? (You can describe in your own words)
|
||||||
|
|
||||||
|
**User**: I want to make this bracket as light as possible
|
||||||
|
|
||||||
|
**Claude**: Got it - mass minimization. Let me check the material...
|
||||||
|
|
||||||
|
**Claude**: ### Question 2 of ~12: Constraints
|
||||||
|
Your model uses Aluminum 6061-T6 (yield stress: 276 MPa).
|
||||||
|
What's the maximum allowable stress? I recommend staying below 184 MPa (SF=1.5).
|
||||||
|
|
||||||
|
**User**: 200 MPa should be fine
|
||||||
|
|
||||||
|
**Claude**: I notice 200 MPa is close to the yield stress. This allows minimal plastic deformation.
|
||||||
|
Do you want to proceed with this value? [Yes/No]
|
||||||
|
|
||||||
|
**User**: Yes, proceed
|
||||||
|
|
||||||
|
*(Interview continues...)*
|
||||||
|
|
||||||
|
**Claude**: ### Summary
|
||||||
|
Here's your optimization setup:
|
||||||
|
|
||||||
|
| Setting | Value |
|
||||||
|
|---------|-------|
|
||||||
|
| Study Name | bracket_v1 |
|
||||||
|
| Objective | Minimize mass (E4) |
|
||||||
|
| Constraint | Max stress ≤ 200 MPa |
|
||||||
|
| Design Variables | thickness [1-10 mm], rib_height [5-20 mm] |
|
||||||
|
| Trials | 100 |
|
||||||
|
| Sampler | TPE |
|
||||||
|
|
||||||
|
Ready to generate? [Confirm/Modify/Restart]
|
||||||
|
|
||||||
|
## Files Created
|
||||||
|
|
||||||
|
After confirmation, the following are generated:
|
||||||
|
- `optimization_config.json` - From blueprint.to_config_json()
|
||||||
|
- `run_optimization.py` - From SYS_10 or SYS_11 template
|
||||||
|
- `INTERVIEW_LOG.md` - Audit trail of Q&A
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
If the interview is interrupted:
|
||||||
|
1. State is auto-saved after each answer
|
||||||
|
2. On restart, detect existing state
|
||||||
|
3. Offer to resume or start fresh
|
||||||
|
|
||||||
|
If validation fails:
|
||||||
|
1. Present error with fix suggestion
|
||||||
|
2. Ask user to acknowledge or modify
|
||||||
|
3. Don't block on warnings, only on errors
|
||||||
61
CLAUDE.md
61
CLAUDE.md
@@ -32,7 +32,8 @@ If working directory is inside a study (`studies/*/`):
|
|||||||
|
|
||||||
| User Keywords | Load Protocol | Subagent Type |
|
| User Keywords | Load Protocol | Subagent Type |
|
||||||
|---------------|---------------|---------------|
|
|---------------|---------------|---------------|
|
||||||
| "create", "new", "set up" | **READ** OP_01 first, then execute | general-purpose |
|
| "create", "new", "set up", "create a study" | **READ** OP_01 + **modules/study-interview-mode.md** (DEFAULT) | general-purpose |
|
||||||
|
| "quick setup", "skip interview", "manual" | **READ** OP_01 + core/study-creation-core.md | general-purpose |
|
||||||
| "run", "start", "trials" | **READ** OP_02 first | - (direct execution) |
|
| "run", "start", "trials" | **READ** OP_02 first | - (direct execution) |
|
||||||
| "status", "progress" | OP_03 | - (DB query) |
|
| "status", "progress" | OP_03 | - (DB query) |
|
||||||
| "results", "analyze", "Pareto" | OP_04 | - (analysis) |
|
| "results", "analyze", "Pareto" | OP_04 | - (analysis) |
|
||||||
@@ -84,12 +85,14 @@ The Protocol Operating System (POS) provides layered documentation:
|
|||||||
|
|
||||||
| Task | Protocol | Key File |
|
| Task | Protocol | Key File |
|
||||||
|------|----------|----------|
|
|------|----------|----------|
|
||||||
| Create study | OP_01 | `docs/protocols/operations/OP_01_CREATE_STUDY.md` |
|
| **Create study (Interview Mode - DEFAULT)** | OP_01 | `.claude/skills/modules/study-interview-mode.md` |
|
||||||
|
| Create study (Manual) | OP_01 | `docs/protocols/operations/OP_01_CREATE_STUDY.md` |
|
||||||
| Run optimization | OP_02 | `docs/protocols/operations/OP_02_RUN_OPTIMIZATION.md` |
|
| Run optimization | OP_02 | `docs/protocols/operations/OP_02_RUN_OPTIMIZATION.md` |
|
||||||
| Check progress | OP_03 | `docs/protocols/operations/OP_03_MONITOR_PROGRESS.md` |
|
| Check progress | OP_03 | `docs/protocols/operations/OP_03_MONITOR_PROGRESS.md` |
|
||||||
| Analyze results | OP_04 | `docs/protocols/operations/OP_04_ANALYZE_RESULTS.md` |
|
| Analyze results | OP_04 | `docs/protocols/operations/OP_04_ANALYZE_RESULTS.md` |
|
||||||
| Export neural data | OP_05 | `docs/protocols/operations/OP_05_EXPORT_TRAINING_DATA.md` |
|
| Export neural data | OP_05 | `docs/protocols/operations/OP_05_EXPORT_TRAINING_DATA.md` |
|
||||||
| Debug issues | OP_06 | `docs/protocols/operations/OP_06_TROUBLESHOOT.md` |
|
| Debug issues | OP_06 | `docs/protocols/operations/OP_06_TROUBLESHOOT.md` |
|
||||||
|
| **Free disk space** | OP_07 | `docs/protocols/operations/OP_07_DISK_OPTIMIZATION.md` |
|
||||||
|
|
||||||
## System Protocols (Technical Specs)
|
## System Protocols (Technical Specs)
|
||||||
|
|
||||||
@@ -129,24 +132,70 @@ C:\Users\antoi\anaconda3\envs\atomizer\python.exe your_script.py
|
|||||||
- Create new virtual environments
|
- Create new virtual environments
|
||||||
- Use system Python
|
- Use system Python
|
||||||
|
|
||||||
|
## Git Configuration
|
||||||
|
|
||||||
|
**CRITICAL: Always push to BOTH remotes when committing.**
|
||||||
|
|
||||||
|
```
|
||||||
|
origin: http://192.168.86.50:3000/Antoine/Atomizer.git (Gitea - local)
|
||||||
|
github: https://github.com/Anto01/Atomizer.git (GitHub - private)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Push Commands
|
||||||
|
```bash
|
||||||
|
# Push to both remotes
|
||||||
|
git push origin main && git push github main
|
||||||
|
|
||||||
|
# Or use --all to push to all remotes
|
||||||
|
git remote | xargs -L1 git push --all
|
||||||
|
```
|
||||||
|
|
||||||
## Key Directories
|
## Key Directories
|
||||||
|
|
||||||
```
|
```
|
||||||
Atomizer/
|
Atomizer/
|
||||||
├── .claude/skills/ # LLM skills (Bootstrap + Core + Modules)
|
├── .claude/skills/ # LLM skills (Bootstrap + Core + Modules)
|
||||||
├── docs/protocols/ # Protocol Operating System
|
├── docs/protocols/ # Protocol Operating System
|
||||||
│ ├── operations/ # OP_01 - OP_06
|
│ ├── operations/ # OP_01 - OP_07
|
||||||
│ ├── system/ # SYS_10 - SYS_15
|
│ ├── system/ # SYS_10 - SYS_15
|
||||||
│ └── extensions/ # EXT_01 - EXT_04
|
│ └── extensions/ # EXT_01 - EXT_04
|
||||||
├── optimization_engine/ # Core Python modules
|
├── optimization_engine/ # Core Python modules (v2.0)
|
||||||
│ ├── extractors/ # Physics extraction library
|
│ ├── core/ # Optimization runners, method_selector, gradient_optimizer
|
||||||
|
│ ├── nx/ # NX/Nastran integration (solver, updater, session_manager)
|
||||||
|
│ ├── study/ # Study management (creator, wizard, state, reset)
|
||||||
|
│ ├── config/ # Configuration (manager, builder, setup_wizard)
|
||||||
|
│ ├── reporting/ # Reports (visualizer, markdown_report, landscape_analyzer)
|
||||||
|
│ ├── processors/ # Data processing
|
||||||
|
│ │ └── surrogates/ # Neural network surrogates
|
||||||
|
│ ├── extractors/ # Physics extraction library (unchanged)
|
||||||
│ ├── gnn/ # GNN surrogate module (Zernike)
|
│ ├── gnn/ # GNN surrogate module (Zernike)
|
||||||
│ └── utils/ # Utilities (dashboard_db, trial_manager)
|
│ ├── utils/ # Utilities (dashboard_db, trial_manager, study_archiver)
|
||||||
|
│ └── validators/ # Validation (unchanged)
|
||||||
├── studies/ # User studies
|
├── studies/ # User studies
|
||||||
|
├── tools/ # CLI tools (archive_study.bat, zernike_html_generator.py)
|
||||||
├── archive/ # Deprecated code (for reference)
|
├── archive/ # Deprecated code (for reference)
|
||||||
└── atomizer-dashboard/ # React dashboard
|
└── atomizer-dashboard/ # React dashboard
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Import Migration (v2.0)
|
||||||
|
Old imports still work with deprecation warnings. New paths:
|
||||||
|
```python
|
||||||
|
# Core
|
||||||
|
from optimization_engine.core.runner import OptimizationRunner
|
||||||
|
from optimization_engine.core.intelligent_optimizer import IMSO
|
||||||
|
from optimization_engine.core.gradient_optimizer import GradientOptimizer
|
||||||
|
|
||||||
|
# NX Integration
|
||||||
|
from optimization_engine.nx.solver import NXSolver
|
||||||
|
from optimization_engine.nx.updater import NXParameterUpdater
|
||||||
|
|
||||||
|
# Study Management
|
||||||
|
from optimization_engine.study.creator import StudyCreator
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
from optimization_engine.config.manager import ConfigManager
|
||||||
|
```
|
||||||
|
|
||||||
## GNN Surrogate for Zernike Optimization
|
## GNN Surrogate for Zernike Optimization
|
||||||
|
|
||||||
The `optimization_engine/gnn/` module provides Graph Neural Network surrogates for mirror optimization:
|
The `optimization_engine/gnn/` module provides Graph Neural Network surrogates for mirror optimization:
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ This extractor reads expressions using the .exp export method for accuracy.
|
|||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, Any
|
from typing import Dict, Any
|
||||||
from optimization_engine.nx_updater import NXParameterUpdater
|
from optimization_engine.nx.updater import NXParameterUpdater
|
||||||
|
|
||||||
|
|
||||||
def extract_expression(prt_file: Path, expression_name: str):
|
def extract_expression(prt_file: Path, expression_name: str):
|
||||||
|
|||||||
@@ -228,11 +228,11 @@ from pathlib import Path
|
|||||||
# Add optimization engine to path
|
# Add optimization engine to path
|
||||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||||
|
|
||||||
from optimization_engine.intelligent_optimizer import IntelligentOptimizer
|
from optimization_engine.core.intelligent_optimizer import IntelligentOptimizer
|
||||||
from optimization_engine.nx_updater import NXParameterUpdater
|
from optimization_engine.nx.updater import NXParameterUpdater
|
||||||
from optimization_engine.nx_solver import NXSolver
|
from optimization_engine.nx.solver import NXSolver
|
||||||
from optimization_engine.extractors.frequency_extractor import extract_first_frequency
|
from optimization_engine.extractors.frequency_extractor import extract_first_frequency
|
||||||
from optimization_engine.generate_report_markdown import generate_markdown_report
|
from optimization_engine.reporting.markdown_report import generate_markdown_report
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ import matplotlib.pyplot as plt
|
|||||||
project_root = Path(__file__).parent
|
project_root = Path(__file__).parent
|
||||||
sys.path.insert(0, str(project_root))
|
sys.path.insert(0, str(project_root))
|
||||||
|
|
||||||
from optimization_engine.active_learning_surrogate import (
|
from optimization_engine.processors.surrogates.active_learning_surrogate import (
|
||||||
ActiveLearningSurrogate,
|
ActiveLearningSurrogate,
|
||||||
extract_training_data_from_study
|
extract_training_data_from_study
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ project_root = Path(__file__).parent
|
|||||||
sys.path.insert(0, str(project_root))
|
sys.path.insert(0, str(project_root))
|
||||||
sys.path.insert(0, str(project_root / 'atomizer-field'))
|
sys.path.insert(0, str(project_root / 'atomizer-field'))
|
||||||
|
|
||||||
from optimization_engine.simple_mlp_surrogate import SimpleSurrogate
|
from optimization_engine.processors.surrogates.simple_mlp_surrogate import SimpleSurrogate
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ def load_config_bounds(study_path: Path) -> dict:
|
|||||||
|
|
||||||
return bounds
|
return bounds
|
||||||
|
|
||||||
from optimization_engine.active_learning_surrogate import EnsembleMLP
|
from optimization_engine.processors.surrogates.active_learning_surrogate import EnsembleMLP
|
||||||
|
|
||||||
|
|
||||||
class ValidatedSurrogate:
|
class ValidatedSurrogate:
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ import matplotlib.pyplot as plt
|
|||||||
project_root = Path(__file__).parent
|
project_root = Path(__file__).parent
|
||||||
sys.path.insert(0, str(project_root))
|
sys.path.insert(0, str(project_root))
|
||||||
|
|
||||||
from optimization_engine.active_learning_surrogate import (
|
from optimization_engine.processors.surrogates.active_learning_surrogate import (
|
||||||
EnsembleMLP,
|
EnsembleMLP,
|
||||||
extract_training_data_from_study
|
extract_training_data_from_study
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ import optuna
|
|||||||
project_root = Path(__file__).parent
|
project_root = Path(__file__).parent
|
||||||
sys.path.insert(0, str(project_root))
|
sys.path.insert(0, str(project_root))
|
||||||
|
|
||||||
from optimization_engine.simple_mlp_surrogate import SimpleSurrogate
|
from optimization_engine.processors.surrogates.simple_mlp_surrogate import SimpleSurrogate
|
||||||
|
|
||||||
def load_fea_data_from_database(db_path: str, study_name: str):
|
def load_fea_data_from_database(db_path: str, study_name: str):
|
||||||
"""Load actual FEA results from database for comparison."""
|
"""Load actual FEA results from database for comparison."""
|
||||||
|
|||||||
@@ -12,8 +12,8 @@ Expected behavior:
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import optuna
|
import optuna
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from optimization_engine.adaptive_characterization import CharacterizationStoppingCriterion
|
from optimization_engine.processors.adaptive_characterization import CharacterizationStoppingCriterion
|
||||||
from optimization_engine.landscape_analyzer import LandscapeAnalyzer
|
from optimization_engine.reporting.landscape_analyzer import LandscapeAnalyzer
|
||||||
|
|
||||||
|
|
||||||
def simple_smooth_function(trial):
|
def simple_smooth_function(trial):
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
"""Test neural surrogate integration."""
|
"""Test neural surrogate integration."""
|
||||||
|
|
||||||
import time
|
import time
|
||||||
from optimization_engine.neural_surrogate import create_surrogate_for_study
|
from optimization_engine.processors.surrogates.neural_surrogate import create_surrogate_for_study
|
||||||
|
|
||||||
print("Testing Neural Surrogate Integration")
|
print("Testing Neural Surrogate Integration")
|
||||||
print("=" * 60)
|
print("=" * 60)
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ project_root = Path(__file__).parent
|
|||||||
sys.path.insert(0, str(project_root))
|
sys.path.insert(0, str(project_root))
|
||||||
sys.path.insert(0, str(project_root / 'atomizer-field'))
|
sys.path.insert(0, str(project_root / 'atomizer-field'))
|
||||||
|
|
||||||
from optimization_engine.neural_surrogate import create_parametric_surrogate_for_study
|
from optimization_engine.processors.surrogates.neural_surrogate import create_parametric_surrogate_for_study
|
||||||
|
|
||||||
# Create surrogate
|
# Create surrogate
|
||||||
print("Creating parametric surrogate...")
|
print("Creating parametric surrogate...")
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
"""Test parametric surrogate integration."""
|
"""Test parametric surrogate integration."""
|
||||||
|
|
||||||
import time
|
import time
|
||||||
from optimization_engine.neural_surrogate import create_parametric_surrogate_for_study
|
from optimization_engine.processors.surrogates.neural_surrogate import create_parametric_surrogate_for_study
|
||||||
|
|
||||||
print("Testing Parametric Neural Surrogate")
|
print("Testing Parametric Neural Surrogate")
|
||||||
print("=" * 60)
|
print("=" * 60)
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ from pathlib import Path
|
|||||||
# Add parent directory to path
|
# Add parent directory to path
|
||||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||||
|
|
||||||
from optimization_engine.runner import OptimizationRunner
|
from optimization_engine.core.runner import OptimizationRunner
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Run the optimization."""
|
"""Run the optimization."""
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import sys
|
|||||||
# Add parent directory to path to import optimization_engine
|
# Add parent directory to path to import optimization_engine
|
||||||
sys.path.append(str(Path(__file__).parent.parent.parent.parent))
|
sys.path.append(str(Path(__file__).parent.parent.parent.parent))
|
||||||
|
|
||||||
from api.routes import optimization, claude, terminal, insights
|
from api.routes import optimization, claude, terminal, insights, context
|
||||||
from api.websocket import optimization_stream
|
from api.websocket import optimization_stream
|
||||||
|
|
||||||
# Create FastAPI app
|
# Create FastAPI app
|
||||||
@@ -37,6 +37,7 @@ app.include_router(optimization_stream.router, prefix="/api/ws", tags=["websocke
|
|||||||
app.include_router(claude.router, prefix="/api/claude", tags=["claude"])
|
app.include_router(claude.router, prefix="/api/claude", tags=["claude"])
|
||||||
app.include_router(terminal.router, prefix="/api/terminal", tags=["terminal"])
|
app.include_router(terminal.router, prefix="/api/terminal", tags=["terminal"])
|
||||||
app.include_router(insights.router, prefix="/api/insights", tags=["insights"])
|
app.include_router(insights.router, prefix="/api/insights", tags=["insights"])
|
||||||
|
app.include_router(context.router, prefix="/api/context", tags=["context"])
|
||||||
|
|
||||||
@app.get("/")
|
@app.get("/")
|
||||||
async def root():
|
async def root():
|
||||||
|
|||||||
450
atomizer-dashboard/backend/api/routes/context.py
Normal file
450
atomizer-dashboard/backend/api/routes/context.py
Normal file
@@ -0,0 +1,450 @@
|
|||||||
|
"""
|
||||||
|
Context Engineering API Routes
|
||||||
|
|
||||||
|
Provides endpoints for:
|
||||||
|
- Viewing playbook contents
|
||||||
|
- Managing session state
|
||||||
|
- Recording feedback on playbook items
|
||||||
|
- Triggering compaction
|
||||||
|
- Monitoring cache efficiency
|
||||||
|
- Exporting learning reports
|
||||||
|
|
||||||
|
Part of the ACE (Agentic Context Engineering) implementation for Atomizer.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, HTTPException, Query
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional, List
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from datetime import datetime
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# Add parent paths for imports
|
||||||
|
sys.path.append(str(Path(__file__).parent.parent.parent.parent.parent))
|
||||||
|
|
||||||
|
router = APIRouter()
|
||||||
|
|
||||||
|
# Paths
|
||||||
|
ATOMIZER_ROOT = Path(__file__).parents[4]
|
||||||
|
PLAYBOOK_PATH = ATOMIZER_ROOT / "knowledge_base" / "playbook.json"
|
||||||
|
|
||||||
|
|
||||||
|
# Pydantic models for request/response
|
||||||
|
class PlaybookItemResponse(BaseModel):
|
||||||
|
id: str
|
||||||
|
category: str
|
||||||
|
content: str
|
||||||
|
helpful_count: int
|
||||||
|
harmful_count: int
|
||||||
|
net_score: int
|
||||||
|
confidence: float
|
||||||
|
tags: List[str]
|
||||||
|
created_at: str
|
||||||
|
last_used: Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
class PlaybookSummary(BaseModel):
|
||||||
|
total_items: int
|
||||||
|
by_category: dict
|
||||||
|
version: int
|
||||||
|
last_updated: str
|
||||||
|
avg_score: float
|
||||||
|
top_score: int
|
||||||
|
lowest_score: int
|
||||||
|
|
||||||
|
|
||||||
|
class FeedbackRequest(BaseModel):
|
||||||
|
item_id: str
|
||||||
|
helpful: bool
|
||||||
|
|
||||||
|
|
||||||
|
class InsightRequest(BaseModel):
|
||||||
|
category: str
|
||||||
|
content: str
|
||||||
|
tags: Optional[List[str]] = None
|
||||||
|
source_trial: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
|
class SessionStateResponse(BaseModel):
|
||||||
|
session_id: str
|
||||||
|
task_type: Optional[str]
|
||||||
|
study_name: Optional[str]
|
||||||
|
study_status: str
|
||||||
|
trials_completed: int
|
||||||
|
trials_total: int
|
||||||
|
best_value: Optional[float]
|
||||||
|
recent_actions: List[str]
|
||||||
|
recent_errors: List[str]
|
||||||
|
|
||||||
|
|
||||||
|
# Helper function to get playbook
|
||||||
|
def get_playbook():
|
||||||
|
"""Load playbook, handling import errors gracefully."""
|
||||||
|
try:
|
||||||
|
from optimization_engine.context.playbook import AtomizerPlaybook
|
||||||
|
return AtomizerPlaybook.load(PLAYBOOK_PATH)
|
||||||
|
except ImportError as e:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=500,
|
||||||
|
detail=f"Context engineering module not available: {str(e)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Playbook endpoints
|
||||||
|
@router.get("/playbook", response_model=PlaybookSummary)
|
||||||
|
async def get_playbook_summary():
|
||||||
|
"""Get playbook summary statistics."""
|
||||||
|
playbook = get_playbook()
|
||||||
|
stats = playbook.get_stats()
|
||||||
|
|
||||||
|
return PlaybookSummary(
|
||||||
|
total_items=stats["total_items"],
|
||||||
|
by_category=stats["by_category"],
|
||||||
|
version=stats["version"],
|
||||||
|
last_updated=stats["last_updated"],
|
||||||
|
avg_score=stats["avg_score"],
|
||||||
|
top_score=stats["max_score"],
|
||||||
|
lowest_score=stats["min_score"]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/playbook/items", response_model=List[PlaybookItemResponse])
|
||||||
|
async def get_playbook_items(
|
||||||
|
category: Optional[str] = Query(None, description="Filter by category (str, mis, tool, etc.)"),
|
||||||
|
min_score: int = Query(0, description="Minimum net score"),
|
||||||
|
min_confidence: float = Query(0.0, description="Minimum confidence (0.0-1.0)"),
|
||||||
|
limit: int = Query(50, description="Maximum items to return"),
|
||||||
|
offset: int = Query(0, description="Pagination offset")
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Get playbook items with optional filtering.
|
||||||
|
|
||||||
|
Categories:
|
||||||
|
- str: Strategy
|
||||||
|
- mis: Mistake
|
||||||
|
- tool: Tool usage
|
||||||
|
- cal: Calculation
|
||||||
|
- dom: Domain knowledge
|
||||||
|
- wf: Workflow
|
||||||
|
"""
|
||||||
|
playbook = get_playbook()
|
||||||
|
|
||||||
|
items = list(playbook.items.values())
|
||||||
|
|
||||||
|
# Filter by category
|
||||||
|
if category:
|
||||||
|
try:
|
||||||
|
from optimization_engine.context.playbook import InsightCategory
|
||||||
|
cat = InsightCategory(category)
|
||||||
|
items = [i for i in items if i.category == cat]
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPException(400, f"Invalid category: {category}. Valid: str, mis, tool, cal, dom, wf")
|
||||||
|
|
||||||
|
# Filter by score
|
||||||
|
items = [i for i in items if i.net_score >= min_score]
|
||||||
|
|
||||||
|
# Filter by confidence
|
||||||
|
items = [i for i in items if i.confidence >= min_confidence]
|
||||||
|
|
||||||
|
# Sort by score
|
||||||
|
items.sort(key=lambda x: x.net_score, reverse=True)
|
||||||
|
|
||||||
|
# Paginate
|
||||||
|
items = items[offset:offset + limit]
|
||||||
|
|
||||||
|
return [
|
||||||
|
PlaybookItemResponse(
|
||||||
|
id=item.id,
|
||||||
|
category=item.category.value,
|
||||||
|
content=item.content,
|
||||||
|
helpful_count=item.helpful_count,
|
||||||
|
harmful_count=item.harmful_count,
|
||||||
|
net_score=item.net_score,
|
||||||
|
confidence=item.confidence,
|
||||||
|
tags=item.tags,
|
||||||
|
created_at=item.created_at,
|
||||||
|
last_used=item.last_used
|
||||||
|
)
|
||||||
|
for item in items
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/playbook/items/{item_id}", response_model=PlaybookItemResponse)
|
||||||
|
async def get_playbook_item(item_id: str):
|
||||||
|
"""Get a specific playbook item by ID."""
|
||||||
|
playbook = get_playbook()
|
||||||
|
|
||||||
|
if item_id not in playbook.items:
|
||||||
|
raise HTTPException(404, f"Item not found: {item_id}")
|
||||||
|
|
||||||
|
item = playbook.items[item_id]
|
||||||
|
|
||||||
|
return PlaybookItemResponse(
|
||||||
|
id=item.id,
|
||||||
|
category=item.category.value,
|
||||||
|
content=item.content,
|
||||||
|
helpful_count=item.helpful_count,
|
||||||
|
harmful_count=item.harmful_count,
|
||||||
|
net_score=item.net_score,
|
||||||
|
confidence=item.confidence,
|
||||||
|
tags=item.tags,
|
||||||
|
created_at=item.created_at,
|
||||||
|
last_used=item.last_used
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/playbook/feedback")
|
||||||
|
async def record_feedback(request: FeedbackRequest):
|
||||||
|
"""
|
||||||
|
Record feedback on a playbook item.
|
||||||
|
|
||||||
|
This is how the system learns:
|
||||||
|
- helpful=true increases the item's score
|
||||||
|
- helpful=false decreases the item's score
|
||||||
|
"""
|
||||||
|
playbook = get_playbook()
|
||||||
|
|
||||||
|
if request.item_id not in playbook.items:
|
||||||
|
raise HTTPException(404, f"Item not found: {request.item_id}")
|
||||||
|
|
||||||
|
playbook.record_outcome(request.item_id, helpful=request.helpful)
|
||||||
|
playbook.save(PLAYBOOK_PATH)
|
||||||
|
|
||||||
|
item = playbook.items[request.item_id]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"item_id": request.item_id,
|
||||||
|
"new_score": item.net_score,
|
||||||
|
"new_confidence": item.confidence,
|
||||||
|
"helpful_count": item.helpful_count,
|
||||||
|
"harmful_count": item.harmful_count
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/playbook/insights")
|
||||||
|
async def add_insight(request: InsightRequest):
|
||||||
|
"""
|
||||||
|
Add a new insight to the playbook.
|
||||||
|
|
||||||
|
Categories:
|
||||||
|
- str: Strategy - Optimization strategies that work
|
||||||
|
- mis: Mistake - Common mistakes to avoid
|
||||||
|
- tool: Tool - Tool usage patterns
|
||||||
|
- cal: Calculation - Formulas and calculations
|
||||||
|
- dom: Domain - Domain-specific knowledge (FEA, NX)
|
||||||
|
- wf: Workflow - Workflow patterns
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from optimization_engine.context.playbook import InsightCategory
|
||||||
|
except ImportError as e:
|
||||||
|
raise HTTPException(500, f"Context module not available: {e}")
|
||||||
|
|
||||||
|
# Validate category
|
||||||
|
try:
|
||||||
|
category = InsightCategory(request.category)
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPException(400, f"Invalid category: {request.category}")
|
||||||
|
|
||||||
|
playbook = get_playbook()
|
||||||
|
|
||||||
|
item = playbook.add_insight(
|
||||||
|
category=category,
|
||||||
|
content=request.content,
|
||||||
|
source_trial=request.source_trial,
|
||||||
|
tags=request.tags
|
||||||
|
)
|
||||||
|
|
||||||
|
playbook.save(PLAYBOOK_PATH)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"item_id": item.id,
|
||||||
|
"category": item.category.value,
|
||||||
|
"content": item.content,
|
||||||
|
"message": "Insight added successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/playbook/items/{item_id}")
|
||||||
|
async def delete_playbook_item(item_id: str):
|
||||||
|
"""Delete a playbook item."""
|
||||||
|
playbook = get_playbook()
|
||||||
|
|
||||||
|
if item_id not in playbook.items:
|
||||||
|
raise HTTPException(404, f"Item not found: {item_id}")
|
||||||
|
|
||||||
|
content = playbook.items[item_id].content[:50]
|
||||||
|
del playbook.items[item_id]
|
||||||
|
playbook.save(PLAYBOOK_PATH)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"deleted": item_id,
|
||||||
|
"content_preview": content
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/playbook/prune")
|
||||||
|
async def prune_playbook(threshold: int = Query(-3, description="Net score threshold for pruning")):
|
||||||
|
"""
|
||||||
|
Prune harmful items from the playbook.
|
||||||
|
|
||||||
|
Items with net_score <= threshold will be removed.
|
||||||
|
"""
|
||||||
|
playbook = get_playbook()
|
||||||
|
|
||||||
|
removed_count = playbook.prune_harmful(threshold=threshold)
|
||||||
|
playbook.save(PLAYBOOK_PATH)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"items_pruned": removed_count,
|
||||||
|
"threshold_used": threshold,
|
||||||
|
"remaining_items": len(playbook.items)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/playbook/context")
|
||||||
|
async def get_playbook_context(
|
||||||
|
task_type: str = Query("optimization", description="Task type for context filtering"),
|
||||||
|
max_items: int = Query(15, description="Maximum items to include"),
|
||||||
|
min_confidence: float = Query(0.5, description="Minimum confidence threshold")
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Get playbook context string formatted for LLM consumption.
|
||||||
|
|
||||||
|
This is what gets injected into the LLM context window.
|
||||||
|
"""
|
||||||
|
playbook = get_playbook()
|
||||||
|
|
||||||
|
context = playbook.get_context_for_task(
|
||||||
|
task_type=task_type,
|
||||||
|
max_items=max_items,
|
||||||
|
min_confidence=min_confidence
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"context": context,
|
||||||
|
"items_included": min(max_items, len(playbook.items)),
|
||||||
|
"task_type": task_type
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Session state endpoints
|
||||||
|
@router.get("/session", response_model=SessionStateResponse)
|
||||||
|
async def get_session_state():
|
||||||
|
"""Get current session state."""
|
||||||
|
try:
|
||||||
|
from optimization_engine.context.session_state import get_session
|
||||||
|
session = get_session()
|
||||||
|
|
||||||
|
return SessionStateResponse(
|
||||||
|
session_id=session.session_id,
|
||||||
|
task_type=session.exposed.task_type.value if session.exposed.task_type else None,
|
||||||
|
study_name=session.exposed.study_name,
|
||||||
|
study_status=session.exposed.study_status,
|
||||||
|
trials_completed=session.exposed.trials_completed,
|
||||||
|
trials_total=session.exposed.trials_total,
|
||||||
|
best_value=session.exposed.best_value,
|
||||||
|
recent_actions=session.exposed.recent_actions[-10:],
|
||||||
|
recent_errors=session.exposed.recent_errors[-5:]
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
raise HTTPException(500, "Session state module not available")
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/session/context")
|
||||||
|
async def get_session_context():
|
||||||
|
"""Get session context string for LLM consumption."""
|
||||||
|
try:
|
||||||
|
from optimization_engine.context.session_state import get_session
|
||||||
|
session = get_session()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"context": session.get_llm_context(),
|
||||||
|
"session_id": session.session_id,
|
||||||
|
"last_updated": session.last_updated
|
||||||
|
}
|
||||||
|
except ImportError:
|
||||||
|
raise HTTPException(500, "Session state module not available")
|
||||||
|
|
||||||
|
|
||||||
|
# Cache monitoring endpoints
|
||||||
|
@router.get("/cache/stats")
|
||||||
|
async def get_cache_stats():
|
||||||
|
"""Get KV-cache efficiency statistics."""
|
||||||
|
try:
|
||||||
|
from optimization_engine.context.cache_monitor import get_cache_optimizer
|
||||||
|
optimizer = get_cache_optimizer()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"stats": optimizer.get_stats_dict(),
|
||||||
|
"report": optimizer.get_report()
|
||||||
|
}
|
||||||
|
except ImportError:
|
||||||
|
return {
|
||||||
|
"message": "Cache monitoring not active",
|
||||||
|
"stats": None
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Learning report endpoints
|
||||||
|
@router.get("/learning/report")
|
||||||
|
async def get_learning_report():
|
||||||
|
"""Get a comprehensive learning report."""
|
||||||
|
playbook = get_playbook()
|
||||||
|
stats = playbook.get_stats()
|
||||||
|
|
||||||
|
# Get top and worst performers
|
||||||
|
items = list(playbook.items.values())
|
||||||
|
items.sort(key=lambda x: x.net_score, reverse=True)
|
||||||
|
|
||||||
|
top_performers = [
|
||||||
|
{"id": i.id, "content": i.content[:100], "score": i.net_score}
|
||||||
|
for i in items[:10]
|
||||||
|
]
|
||||||
|
|
||||||
|
items.sort(key=lambda x: x.net_score)
|
||||||
|
worst_performers = [
|
||||||
|
{"id": i.id, "content": i.content[:100], "score": i.net_score}
|
||||||
|
for i in items[:5] if i.net_score < 0
|
||||||
|
]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"generated_at": datetime.now().isoformat(),
|
||||||
|
"playbook_stats": stats,
|
||||||
|
"top_performers": top_performers,
|
||||||
|
"worst_performers": worst_performers,
|
||||||
|
"recommendations": _generate_recommendations(playbook)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _generate_recommendations(playbook) -> List[str]:
|
||||||
|
"""Generate recommendations based on playbook state."""
|
||||||
|
recommendations = []
|
||||||
|
|
||||||
|
# Check for harmful items
|
||||||
|
harmful = [i for i in playbook.items.values() if i.net_score < -3]
|
||||||
|
if harmful:
|
||||||
|
recommendations.append(
|
||||||
|
f"Consider pruning {len(harmful)} harmful items (net_score < -3)"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check for untested items
|
||||||
|
untested = [
|
||||||
|
i for i in playbook.items.values()
|
||||||
|
if i.helpful_count + i.harmful_count == 0
|
||||||
|
]
|
||||||
|
if len(untested) > 10:
|
||||||
|
recommendations.append(
|
||||||
|
f"{len(untested)} items have no feedback - consider testing them"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check category balance
|
||||||
|
stats = playbook.get_stats()
|
||||||
|
if stats["by_category"].get("MISTAKE", 0) < 5:
|
||||||
|
recommendations.append(
|
||||||
|
"Low mistake count - actively record errors when they occur"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not recommendations:
|
||||||
|
recommendations.append("Playbook is in good health!")
|
||||||
|
|
||||||
|
return recommendations
|
||||||
@@ -963,7 +963,7 @@ async def convert_study_mesh(study_id: str):
|
|||||||
|
|
||||||
# Import mesh converter
|
# Import mesh converter
|
||||||
sys.path.append(str(Path(__file__).parent.parent.parent.parent.parent))
|
sys.path.append(str(Path(__file__).parent.parent.parent.parent.parent))
|
||||||
from optimization_engine.mesh_converter import convert_study_mesh
|
from optimization_engine.nx.mesh_converter import convert_study_mesh
|
||||||
|
|
||||||
# Convert mesh
|
# Convert mesh
|
||||||
output_path = convert_study_mesh(study_dir)
|
output_path = convert_study_mesh(study_dir)
|
||||||
|
|||||||
@@ -34,8 +34,8 @@ from typing import Optional
|
|||||||
PROJECT_ROOT = Path(__file__).parent
|
PROJECT_ROOT = Path(__file__).parent
|
||||||
sys.path.insert(0, str(PROJECT_ROOT))
|
sys.path.insert(0, str(PROJECT_ROOT))
|
||||||
|
|
||||||
from optimization_engine.auto_trainer import AutoTrainer, check_training_status
|
from optimization_engine.processors.surrogates.auto_trainer import AutoTrainer, check_training_status
|
||||||
from optimization_engine.template_loader import (
|
from optimization_engine.config.template_loader import (
|
||||||
create_study_from_template,
|
create_study_from_template,
|
||||||
list_templates,
|
list_templates,
|
||||||
get_template
|
get_template
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ def setup_python_path():
|
|||||||
"""
|
"""
|
||||||
Add Atomizer root to Python path if not already present.
|
Add Atomizer root to Python path if not already present.
|
||||||
|
|
||||||
This allows imports like `from optimization_engine.runner import ...`
|
This allows imports like `from optimization_engine.core.runner import ...`
|
||||||
to work from anywhere in the project.
|
to work from anywhere in the project.
|
||||||
"""
|
"""
|
||||||
root = get_atomizer_root()
|
root = get_atomizer_root()
|
||||||
@@ -124,7 +124,7 @@ def ensure_imports():
|
|||||||
atomizer_paths.ensure_imports()
|
atomizer_paths.ensure_imports()
|
||||||
|
|
||||||
# Now you can import Atomizer modules
|
# Now you can import Atomizer modules
|
||||||
from optimization_engine.runner import OptimizationRunner
|
from optimization_engine.core.runner import OptimizationRunner
|
||||||
```
|
```
|
||||||
"""
|
"""
|
||||||
setup_python_path()
|
setup_python_path()
|
||||||
|
|||||||
931
docs/ATOMIZER_PODCAST_BRIEFING.md
Normal file
931
docs/ATOMIZER_PODCAST_BRIEFING.md
Normal file
@@ -0,0 +1,931 @@
|
|||||||
|
# Atomizer: Intelligent FEA Optimization & NX Configuration Framework
|
||||||
|
## Complete Technical Briefing Document for Podcast Generation
|
||||||
|
|
||||||
|
**Document Version:** 2.2
|
||||||
|
**Generated:** January 2, 2026
|
||||||
|
**Purpose:** NotebookLM/AI Podcast Source Material
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 1: PROJECT OVERVIEW & PHILOSOPHY
|
||||||
|
|
||||||
|
## What is Atomizer?
|
||||||
|
|
||||||
|
Atomizer is an **intelligent optimization engine and NX configurator** designed to bridge the gap between state-of-the-art simulation methods and performant, production-ready FEA workflows. It's not about CAD manipulation or mesh generation - those are setup concerns. Atomizer focuses on what matters: **making advanced simulation methods accessible and effective**.
|
||||||
|
|
||||||
|
### The Core Problem We Solve
|
||||||
|
|
||||||
|
State-of-the-art optimization algorithms exist in academic papers. Performant FEA simulations exist in commercial tools like NX Nastran. But bridging these two worlds requires:
|
||||||
|
- Deep knowledge of optimization theory (TPE, CMA-ES, Bayesian methods)
|
||||||
|
- Understanding of simulation physics and solver behavior
|
||||||
|
- Experience with what works for different problem types
|
||||||
|
- Infrastructure for running hundreds of automated trials
|
||||||
|
|
||||||
|
Most engineers don't have time to become experts in all these domains. **Atomizer is that bridge.**
|
||||||
|
|
||||||
|
### The Core Philosophy: "Optimize Smarter, Not Harder"
|
||||||
|
|
||||||
|
Traditional structural optimization is painful because:
|
||||||
|
- Engineers pick algorithms without knowing which is best for their problem
|
||||||
|
- Every new study starts from scratch - no accumulated knowledge
|
||||||
|
- Commercial tools offer generic methods, not physics-appropriate ones
|
||||||
|
- Simulation expertise and optimization expertise rarely coexist
|
||||||
|
|
||||||
|
Atomizer solves this by:
|
||||||
|
1. **Characterizing each study** to understand its optimization landscape
|
||||||
|
2. **Selecting methods automatically** based on problem characteristics
|
||||||
|
3. **Learning from every study** what works and what doesn't
|
||||||
|
4. **Building a knowledge base** of parameter-performance relationships
|
||||||
|
|
||||||
|
### What Atomizer Is NOT
|
||||||
|
|
||||||
|
- It's not a CAD tool - geometry modeling happens in NX
|
||||||
|
- It's not a mesh generator - meshing is handled by NX Pre/Post
|
||||||
|
- It's not replacing the engineer's judgment - it's amplifying it
|
||||||
|
- It's not a black box - every decision is traceable and explainable
|
||||||
|
|
||||||
|
### Target Audience
|
||||||
|
|
||||||
|
- **FEA Engineers** who want to run serious optimization campaigns
|
||||||
|
- **Simulation specialists** tired of manual trial-and-error
|
||||||
|
- **Research teams** exploring design spaces systematically
|
||||||
|
- **Anyone** who needs to find optimal designs faster
|
||||||
|
|
||||||
|
### Key Differentiators from Commercial Tools
|
||||||
|
|
||||||
|
| Feature | OptiStruct/HEEDS | optiSLang | Atomizer |
|
||||||
|
|---------|------------------|-----------|----------|
|
||||||
|
| Algorithm selection | Manual | Manual | **Automatic (IMSO)** |
|
||||||
|
| Learning from history | None | None | **LAC persistent memory** |
|
||||||
|
| Study characterization | Basic | Basic | **Full landscape analysis** |
|
||||||
|
| Neural acceleration | Limited | Basic | **GNN + MLP + Gradient** |
|
||||||
|
| Protocol validation | None | None | **Research → Review → Approve** |
|
||||||
|
| Documentation source | Static manuals | Static manuals | **MCP-first, live lookups** |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 2: STUDY CHARACTERIZATION & PERFORMANCE LEARNING
|
||||||
|
|
||||||
|
## The Heart of Atomizer: Understanding What Works
|
||||||
|
|
||||||
|
The most valuable thing Atomizer does is **learn what makes studies succeed**. This isn't just recording results - it's building a deep understanding of the relationship between:
|
||||||
|
|
||||||
|
- **Study parameters** (geometry type, design variable count, constraint complexity)
|
||||||
|
- **Optimization methods** (which algorithm, what settings)
|
||||||
|
- **Performance outcomes** (convergence speed, solution quality, feasibility rate)
|
||||||
|
|
||||||
|
### Study Characterization Process
|
||||||
|
|
||||||
|
When Atomizer runs an optimization, it doesn't just optimize - it **characterizes**:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ STUDY CHARACTERIZATION │
|
||||||
|
├─────────────────────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ PROBLEM FINGERPRINT: │
|
||||||
|
│ • Geometry type (bracket, beam, mirror, shell, assembly) │
|
||||||
|
│ • Number of design variables (1-5, 6-10, 11+) │
|
||||||
|
│ • Objective physics (stress, frequency, displacement, WFE) │
|
||||||
|
│ • Constraint types (upper/lower bounds, ratios) │
|
||||||
|
│ • Solver type (SOL 101, 103, 105, 111, 112) │
|
||||||
|
│ │
|
||||||
|
│ LANDSCAPE METRICS (computed during characterization phase): │
|
||||||
|
│ • Smoothness score (0-1): How continuous is the response? │
|
||||||
|
│ • Multimodality: How many distinct good regions exist? │
|
||||||
|
│ • Parameter correlations: Which variables matter most? │
|
||||||
|
│ • Noise level: How much solver variation exists? │
|
||||||
|
│ • Dimensionality impact: How does space grow with variables? │
|
||||||
|
│ │
|
||||||
|
│ PERFORMANCE OUTCOME: │
|
||||||
|
│ • Trials to convergence │
|
||||||
|
│ • Best objective achieved │
|
||||||
|
│ • Constraint satisfaction rate │
|
||||||
|
│ • Algorithm that won (if IMSO used) │
|
||||||
|
│ │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Learning What Works: The LAC System
|
||||||
|
|
||||||
|
LAC (Learning Atomizer Core) stores the relationship between study characteristics and outcomes:
|
||||||
|
|
||||||
|
```
|
||||||
|
knowledge_base/lac/
|
||||||
|
├── optimization_memory/ # Performance by geometry type
|
||||||
|
│ ├── bracket.jsonl # "For brackets with 4-6 vars, TPE converges in ~60 trials"
|
||||||
|
│ ├── beam.jsonl # "Beam frequency problems are smooth - CMA-ES works well"
|
||||||
|
│ └── mirror.jsonl # "Zernike objectives need GP-BO for sample efficiency"
|
||||||
|
├── session_insights/
|
||||||
|
│ ├── success_pattern.jsonl # What configurations led to fast convergence
|
||||||
|
│ ├── failure.jsonl # What configurations failed and why
|
||||||
|
│ └── workaround.jsonl # Fixes for common issues
|
||||||
|
└── method_performance/
|
||||||
|
└── algorithm_selection.jsonl # Which algorithm won for which problem type
|
||||||
|
```
|
||||||
|
|
||||||
|
### Querying Historical Performance
|
||||||
|
|
||||||
|
Before starting a new study, Atomizer queries LAC:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# What worked for similar problems?
|
||||||
|
similar_studies = lac.query_similar_optimizations(
|
||||||
|
geometry_type="bracket",
|
||||||
|
n_objectives=2,
|
||||||
|
n_design_vars=5,
|
||||||
|
physics=["stress", "mass"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Result: "For 2-objective bracket problems with 5 vars,
|
||||||
|
# NSGA-II with 80 trials typically finds a good Pareto front.
|
||||||
|
# GP-BO is overkill - the landscape is usually rugged."
|
||||||
|
|
||||||
|
# Get the recommended method
|
||||||
|
recommendation = lac.get_best_method_for(
|
||||||
|
geometry_type="bracket",
|
||||||
|
n_objectives=2,
|
||||||
|
constraint_types=["upper_bound"]
|
||||||
|
)
|
||||||
|
# Result: {"method": "NSGA-II", "n_trials": 80, "confidence": 0.87}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Why This Matters
|
||||||
|
|
||||||
|
Commercial tools treat every optimization as if it's the first one ever run. **Atomizer treats every optimization as an opportunity to learn.**
|
||||||
|
|
||||||
|
After 100 studies:
|
||||||
|
- Atomizer knows that mirror problems need sample-efficient methods
|
||||||
|
- Atomizer knows that bracket stress problems are often rugged
|
||||||
|
- Atomizer knows that frequency optimization is usually smooth
|
||||||
|
- Atomizer knows which constraint formulations cause infeasibility
|
||||||
|
|
||||||
|
This isn't AI magic - it's **structured knowledge accumulation** that makes every future study faster and more reliable.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 3: THE PROTOCOL OPERATING SYSTEM
|
||||||
|
|
||||||
|
## Structured, Traceable Operations
|
||||||
|
|
||||||
|
Atomizer operates through a 4-layer protocol system that ensures every action is:
|
||||||
|
- **Documented** - what should happen is written down
|
||||||
|
- **Traceable** - what actually happened is logged
|
||||||
|
- **Validated** - outcomes are checked against expectations
|
||||||
|
- **Improvable** - protocols can be updated based on experience
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Layer 0: BOOTSTRAP │
|
||||||
|
│ Purpose: Task routing, session initialization │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Layer 1: OPERATIONS (OP_01 - OP_07) │
|
||||||
|
│ Create Study | Run Optimization | Monitor | Analyze | Export │
|
||||||
|
│ Troubleshoot | Disk Optimization │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Layer 2: SYSTEM (SYS_10 - SYS_17) │
|
||||||
|
│ IMSO | Multi-objective | Extractors | Dashboard │
|
||||||
|
│ Neural Acceleration | Method Selector | Study Insights │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Layer 3: EXTENSIONS (EXT_01 - EXT_04) │
|
||||||
|
│ Create Extractor | Create Hook | Create Protocol | Create Skill │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Protocol Evolution: Research → Review → Approve
|
||||||
|
|
||||||
|
**What happens when no protocol exists for your use case?**
|
||||||
|
|
||||||
|
This is where Atomizer's extensibility shines. The system has a structured workflow for adding new capabilities:
|
||||||
|
|
||||||
|
### The Protocol Evolution Workflow
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ STEP 1: IDENTIFY GAP │
|
||||||
|
│ ───────────────────────────────────────────────────────────── │
|
||||||
|
│ User: "I need to extract buckling load factors" │
|
||||||
|
│ Atomizer: "No existing extractor for buckling. Initiating │
|
||||||
|
│ new capability development." │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ STEP 2: RESEARCH PHASE │
|
||||||
|
│ ───────────────────────────────────────────────────────────── │
|
||||||
|
│ 1. Query MCP Siemens docs: "How does NX store buckling?" │
|
||||||
|
│ 2. Check pyNastran docs: "OP2 buckling result format" │
|
||||||
|
│ 3. Search NX Open TSE: Example journals for SOL 105 │
|
||||||
|
│ 4. Draft extractor implementation │
|
||||||
|
│ 5. Create test cases │
|
||||||
|
│ │
|
||||||
|
│ Output: Draft protocol + implementation + tests │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ STEP 3: PUSH TO APPROVAL BUCKET │
|
||||||
|
│ ───────────────────────────────────────────────────────────── │
|
||||||
|
│ Location: docs/protocols/pending/ │
|
||||||
|
│ │
|
||||||
|
│ Contents: │
|
||||||
|
│ • Protocol document (EXT_XX_BUCKLING_EXTRACTOR.md) │
|
||||||
|
│ • Implementation (extract_buckling.py) │
|
||||||
|
│ • Test suite (test_buckling_extractor.py) │
|
||||||
|
│ • Validation evidence (example outputs) │
|
||||||
|
│ │
|
||||||
|
│ Status: PENDING_REVIEW │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ STEP 4: PRIVILEGED REVIEW │
|
||||||
|
│ ───────────────────────────────────────────────────────────── │
|
||||||
|
│ Reviewer with "power_user" or "admin" privilege: │
|
||||||
|
│ │
|
||||||
|
│ Checks: │
|
||||||
|
│ ☐ Implementation follows extractor patterns │
|
||||||
|
│ ☐ Tests pass on multiple SOL 105 models │
|
||||||
|
│ ☐ Documentation is complete │
|
||||||
|
│ ☐ Error handling is robust │
|
||||||
|
│ ☐ No security concerns │
|
||||||
|
│ │
|
||||||
|
│ Decision: APPROVE / REQUEST_CHANGES / REJECT │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ STEP 5: INTEGRATION │
|
||||||
|
│ ───────────────────────────────────────────────────────────── │
|
||||||
|
│ On APPROVE: │
|
||||||
|
│ • Move to docs/protocols/system/ │
|
||||||
|
│ • Add to optimization_engine/extractors/__init__.py │
|
||||||
|
│ • Update SYS_12_EXTRACTOR_LIBRARY.md │
|
||||||
|
│ • Update .claude/skills/01_CHEATSHEET.md │
|
||||||
|
│ • Commit with: "feat: Add E23 buckling extractor" │
|
||||||
|
│ │
|
||||||
|
│ Status: ACTIVE - Now part of Atomizer ecosystem │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Privilege Levels
|
||||||
|
|
||||||
|
| Level | Can Do | Cannot Do |
|
||||||
|
|-------|--------|-----------|
|
||||||
|
| **user** | Use all OP_* protocols | Create/modify protocols |
|
||||||
|
| **power_user** | Use OP_* + EXT_01, EXT_02 | Approve new system protocols |
|
||||||
|
| **admin** | Everything | - |
|
||||||
|
|
||||||
|
This ensures:
|
||||||
|
- Anyone can propose new capabilities
|
||||||
|
- Only validated code enters the ecosystem
|
||||||
|
- Quality standards are maintained
|
||||||
|
- The system grows safely over time
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 4: STUDY INTERVIEW MODE - INTELLIGENT STUDY CREATION
|
||||||
|
|
||||||
|
## The Problem: Configuration Complexity
|
||||||
|
|
||||||
|
Creating an optimization study traditionally requires:
|
||||||
|
- Understanding optimization_config.json schema
|
||||||
|
- Knowing which extractor (E1-E24) maps to which physics
|
||||||
|
- Setting appropriate bounds for design variables
|
||||||
|
- Choosing the right sampler and trial count
|
||||||
|
- Avoiding common anti-patterns (mass optimization without constraints)
|
||||||
|
|
||||||
|
**Most engineers aren't optimization experts.** They know their physics, not Optuna samplers.
|
||||||
|
|
||||||
|
## The Solution: Guided Interview
|
||||||
|
|
||||||
|
Instead of asking users to fill out JSON files, Atomizer now **interviews them through natural conversation**.
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ STUDY INTERVIEW MODE (DEFAULT for all study creation) │
|
||||||
|
├─────────────────────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ User: "I want to create a study for my bracket" │
|
||||||
|
│ │
|
||||||
|
│ Atomizer: "I'll help you set up your optimization study. │
|
||||||
|
│ Let me ask a few questions..." │
|
||||||
|
│ │
|
||||||
|
│ ┌──────────────────────────────────────────────────────────┐ │
|
||||||
|
│ │ PHASE 1: INTROSPECTION (automatic) │ │
|
||||||
|
│ │ • Analyze NX model expressions │ │
|
||||||
|
│ │ • Detect materials from simulation │ │
|
||||||
|
│ │ • Identify candidate design variables │ │
|
||||||
|
│ └──────────────────────────────────────────────────────────┘ │
|
||||||
|
│ │
|
||||||
|
│ ┌──────────────────────────────────────────────────────────┐ │
|
||||||
|
│ │ PHASE 2: PROBLEM DEFINITION │ │
|
||||||
|
│ │ Q: "What are you trying to optimize?" │ │
|
||||||
|
│ │ A: "Minimize mass while keeping stress low" │ │
|
||||||
|
│ └──────────────────────────────────────────────────────────┘ │
|
||||||
|
│ │
|
||||||
|
│ ┌──────────────────────────────────────────────────────────┐ │
|
||||||
|
│ │ PHASE 3: OBJECTIVES (auto-mapped to extractors) │ │
|
||||||
|
│ │ • Mass → E4 (BDF mass extractor) │ │
|
||||||
|
│ │ • Stress → E3 (Von Mises stress) │ │
|
||||||
|
│ │ • No manual extractor selection needed! │ │
|
||||||
|
│ └──────────────────────────────────────────────────────────┘ │
|
||||||
|
│ │
|
||||||
|
│ ┌──────────────────────────────────────────────────────────┐ │
|
||||||
|
│ │ PHASE 4: CONSTRAINTS (material-aware validation) │ │
|
||||||
|
│ │ Q: "What's the maximum stress limit?" │ │
|
||||||
|
│ │ A: "200 MPa" │ │
|
||||||
|
│ │ │ │
|
||||||
|
│ │ ⚠️ "Your model uses Aluminum 6061-T6 (yield: 276 MPa). │ │
|
||||||
|
│ │ 200 MPa is close to yield. Consider 184 MPa (SF=1.5)"│ │
|
||||||
|
│ └──────────────────────────────────────────────────────────┘ │
|
||||||
|
│ │
|
||||||
|
│ ┌──────────────────────────────────────────────────────────┐ │
|
||||||
|
│ │ PHASE 5: DESIGN VARIABLES (from introspection) │ │
|
||||||
|
│ │ "I found these expressions in your model: │ │
|
||||||
|
│ │ • thickness (current: 5mm) │ │
|
||||||
|
│ │ • rib_height (current: 10mm) │ │
|
||||||
|
│ │ Which should we optimize?" │ │
|
||||||
|
│ │ │ │
|
||||||
|
│ │ → Auto-suggests bounds: 2.5-7.5mm (±50% of current) │ │
|
||||||
|
│ └──────────────────────────────────────────────────────────┘ │
|
||||||
|
│ │
|
||||||
|
│ ┌──────────────────────────────────────────────────────────┐ │
|
||||||
|
│ │ PHASE 6: REVIEW & GENERATE │ │
|
||||||
|
│ │ Shows complete blueprint, asks for confirmation │ │
|
||||||
|
│ │ → Generates optimization_config.json │ │
|
||||||
|
│ │ → Generates run_optimization.py │ │
|
||||||
|
│ └──────────────────────────────────────────────────────────┘ │
|
||||||
|
│ │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Anti-Pattern Detection
|
||||||
|
|
||||||
|
The interview includes an **Engineering Validator** that catches common mistakes:
|
||||||
|
|
||||||
|
| Anti-Pattern | Detection | Warning |
|
||||||
|
|--------------|-----------|---------|
|
||||||
|
| `mass_no_constraint` | Mass objective without stress/displacement limit | "This typically produces paper-thin designs" |
|
||||||
|
| `stress_over_yield` | Stress limit > material yield | "Consider safety factor 1.5-2.0" |
|
||||||
|
| `bounds_too_wide` | Variable range > 100x | "Wide bounds = slow convergence" |
|
||||||
|
| `too_many_objectives` | >3 objectives | "Focus on key goals for tractable optimization" |
|
||||||
|
|
||||||
|
### Materials Database
|
||||||
|
|
||||||
|
Built-in knowledge of engineering materials:
|
||||||
|
- **12 common materials** (aluminum, steel, titanium, composites)
|
||||||
|
- **Fuzzy name matching**: "Al 6061", "6061-T6", "aluminum" → all work
|
||||||
|
- **Safety factors** by application (static, fatigue, impact)
|
||||||
|
- **Yield/ultimate stress** validation
|
||||||
|
|
||||||
|
### Key Benefits
|
||||||
|
|
||||||
|
1. **Zero configuration knowledge needed** - Just describe what you want
|
||||||
|
2. **Material-aware validation** - Catches stress limits vs. yield
|
||||||
|
3. **Auto extractor mapping** - Goals → E1-E24 automatically
|
||||||
|
4. **Anti-pattern detection** - Warns about common mistakes
|
||||||
|
5. **State persistence** - Resume interrupted interviews
|
||||||
|
6. **Blueprint validation** - Complete config before generation
|
||||||
|
|
||||||
|
### Trigger Phrases
|
||||||
|
|
||||||
|
Any of these start Interview Mode (now the DEFAULT):
|
||||||
|
- "Create a study", "new study", "set up study"
|
||||||
|
- "Optimize this", "optimize my model"
|
||||||
|
- "I want to minimize mass"
|
||||||
|
|
||||||
|
To skip Interview Mode (power users only):
|
||||||
|
- "Quick setup", "skip interview", "manual config"
|
||||||
|
|
||||||
|
### Technical Implementation
|
||||||
|
|
||||||
|
```
|
||||||
|
optimization_engine/interview/
|
||||||
|
├── study_interview.py # Main orchestrator (StudyInterviewEngine)
|
||||||
|
├── question_engine.py # Conditional logic, dynamic options
|
||||||
|
├── interview_state.py # Persistent state, JSON serialization
|
||||||
|
├── interview_presenter.py # ClaudePresenter, DashboardPresenter
|
||||||
|
├── engineering_validator.py # Materials DB, anti-pattern detector
|
||||||
|
├── study_blueprint.py # Validated configuration generation
|
||||||
|
└── schemas/
|
||||||
|
├── interview_questions.json # 17 questions, 7 phases
|
||||||
|
├── materials_database.json # 12 materials with properties
|
||||||
|
└── anti_patterns.json # 12 anti-pattern definitions
|
||||||
|
```
|
||||||
|
|
||||||
|
**All 129 tests passing.**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 5: MCP-FIRST DEVELOPMENT APPROACH
|
||||||
|
|
||||||
|
## When Functions Don't Exist: How Atomizer Develops New Capabilities
|
||||||
|
|
||||||
|
When Atomizer encounters a task without an existing extractor or protocol, it follows a **documentation-first development approach** using MCP (Model Context Protocol) tools.
|
||||||
|
|
||||||
|
### The Documentation Hierarchy
|
||||||
|
|
||||||
|
```
|
||||||
|
PRIMARY SOURCE (Always check first):
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ MCP Siemens Documentation Tools │
|
||||||
|
│ ───────────────────────────────────────────────────────────── │
|
||||||
|
│ • mcp__siemens-docs__nxopen_get_class │
|
||||||
|
│ → Get official NX Open class documentation │
|
||||||
|
│ → Example: Query "CaeResultType" for result access patterns │
|
||||||
|
│ │
|
||||||
|
│ • mcp__siemens-docs__nxopen_get_index │
|
||||||
|
│ → Browse class/function indexes │
|
||||||
|
│ → Find related classes for a capability │
|
||||||
|
│ │
|
||||||
|
│ • mcp__siemens-docs__siemens_docs_list │
|
||||||
|
│ → List all available documentation resources │
|
||||||
|
│ │
|
||||||
|
│ WHY PRIMARY: This is the official, up-to-date source. │
|
||||||
|
│ API calls verified against actual NX Open signatures. │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
SECONDARY SOURCES (Use when MCP doesn't have the answer):
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ pyNastran Documentation │
|
||||||
|
│ ───────────────────────────────────────────────────────────── │
|
||||||
|
│ For OP2/F06 result parsing patterns │
|
||||||
|
│ Example: How to access buckling eigenvalues from OP2 │
|
||||||
|
│ Location: pyNastran GitHub, readthedocs │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ NX Open TSE (Technical Support Examples) │
|
||||||
|
│ ───────────────────────────────────────────────────────────── │
|
||||||
|
│ Community examples and Siemens support articles │
|
||||||
|
│ Example: Working journal for exporting specific result types │
|
||||||
|
│ Location: Siemens Community, support articles │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Existing Atomizer Extractors │
|
||||||
|
│ ───────────────────────────────────────────────────────────── │
|
||||||
|
│ Pattern reference from similar implementations │
|
||||||
|
│ Example: How extract_frequency.py handles modal results │
|
||||||
|
│ Location: optimization_engine/extractors/ │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example: Developing a New Extractor
|
||||||
|
|
||||||
|
User request: "I need to extract heat flux from thermal analysis results"
|
||||||
|
|
||||||
|
**Step 1: Query MCP First**
|
||||||
|
```python
|
||||||
|
# Query NX Open documentation
|
||||||
|
mcp__siemens-docs__nxopen_get_class("CaeResultComponent")
|
||||||
|
# Returns: Official documentation for result component access
|
||||||
|
|
||||||
|
mcp__siemens-docs__nxopen_get_class("HeatFluxComponent")
|
||||||
|
# Returns: Specific heat flux result access patterns
|
||||||
|
```
|
||||||
|
|
||||||
|
**Step 2: Check pyNastran for OP2 Parsing**
|
||||||
|
```python
|
||||||
|
# How does pyNastran represent thermal results?
|
||||||
|
# Check: model.thermalFlux or model.heatFlux structures
|
||||||
|
```
|
||||||
|
|
||||||
|
**Step 3: Reference Existing Extractors**
|
||||||
|
```python
|
||||||
|
# Look at extract_temperature.py for thermal result patterns
|
||||||
|
# Adapt the OP2 access pattern for heat flux
|
||||||
|
```
|
||||||
|
|
||||||
|
**Step 4: Implement with Verified API Calls**
|
||||||
|
```python
|
||||||
|
def extract_heat_flux(op2_file: Path, subcase: int = 1) -> Dict:
|
||||||
|
"""
|
||||||
|
Extract heat flux from SOL 153/159 thermal results.
|
||||||
|
|
||||||
|
API Reference: NX Open CaeResultComponent (via MCP)
|
||||||
|
OP2 Format: pyNastran thermal flux structures
|
||||||
|
"""
|
||||||
|
# Implementation using verified patterns
|
||||||
|
```
|
||||||
|
|
||||||
|
### Why This Matters
|
||||||
|
|
||||||
|
- **No guessing** - Every API call is verified against documentation
|
||||||
|
- **Maintainable** - When NX updates, we check official docs first
|
||||||
|
- **Traceable** - Each extractor documents its sources
|
||||||
|
- **Reliable** - Secondary sources only fill gaps, never override primary
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 5: SIMULATION-FOCUSED OPTIMIZATION
|
||||||
|
|
||||||
|
## Bridging State-of-the-Art Methods and Performant Simulations
|
||||||
|
|
||||||
|
Atomizer's core mission is making advanced optimization methods work seamlessly with NX Nastran simulations. The CAD and mesh are setup concerns - **our focus is on the simulation loop.**
|
||||||
|
|
||||||
|
### The Simulation Optimization Loop
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ SIMULATION-CENTRIC WORKFLOW │
|
||||||
|
├─────────────────────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ ┌─────────────┐ │
|
||||||
|
│ │ OPTIMIZER │ ← State-of-the-art algorithms │
|
||||||
|
│ │ (Atomizer) │ TPE, CMA-ES, GP-BO, NSGA-II │
|
||||||
|
│ └──────┬──────┘ + Neural surrogates │
|
||||||
|
│ │ │
|
||||||
|
│ ▼ Design Variables │
|
||||||
|
│ ┌─────────────┐ │
|
||||||
|
│ │ NX CONFIG │ ← Expression updates via .exp files │
|
||||||
|
│ │ UPDATER │ Automated, no GUI interaction │
|
||||||
|
│ └──────┬──────┘ │
|
||||||
|
│ │ │
|
||||||
|
│ ▼ Updated Model │
|
||||||
|
│ ┌─────────────┐ │
|
||||||
|
│ │ NX NASTRAN │ ← SOL 101, 103, 105, 111, 112 │
|
||||||
|
│ │ SOLVER │ Batch mode execution │
|
||||||
|
│ └──────┬──────┘ │
|
||||||
|
│ │ │
|
||||||
|
│ ▼ Results (OP2, F06) │
|
||||||
|
│ ┌─────────────┐ │
|
||||||
|
│ │ EXTRACTORS │ ← 24 physics extractors │
|
||||||
|
│ │ (pyNastran) │ Stress, displacement, frequency, etc. │
|
||||||
|
│ └──────┬──────┘ │
|
||||||
|
│ │ │
|
||||||
|
│ ▼ Objectives & Constraints │
|
||||||
|
│ ┌─────────────┐ │
|
||||||
|
│ │ OPTIMIZER │ ← Learning: What parameters → What results │
|
||||||
|
│ │ (Atomizer) │ Building surrogate models │
|
||||||
|
│ └─────────────┘ │
|
||||||
|
│ │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Supported Nastran Solution Types
|
||||||
|
|
||||||
|
| SOL | Type | What Atomizer Optimizes |
|
||||||
|
|-----|------|-------------------------|
|
||||||
|
| 101 | Linear Static | Stress, displacement, stiffness |
|
||||||
|
| 103 | Normal Modes | Frequencies, mode shapes, modal mass |
|
||||||
|
| 105 | Buckling | Critical load factors, stability margins |
|
||||||
|
| 111 | Frequency Response | Transfer functions, resonance peaks |
|
||||||
|
| 112 | Transient Response | Peak dynamic response, settling time |
|
||||||
|
|
||||||
|
### NX Expression Management
|
||||||
|
|
||||||
|
Atomizer updates NX models through the expression system - no manual CAD editing:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Expression file format (.exp)
|
||||||
|
[MilliMeter]rib_thickness=12.5
|
||||||
|
[MilliMeter]flange_width=25.0
|
||||||
|
[Degrees]support_angle=45.0
|
||||||
|
|
||||||
|
# Atomizer generates this, NX imports it, geometry updates automatically
|
||||||
|
```
|
||||||
|
|
||||||
|
This keeps the optimization loop fast:
|
||||||
|
- No interactive sessions
|
||||||
|
- No license seat occupation during solver runs
|
||||||
|
- Batch processing of hundreds of trials
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 6: OPTIMIZATION ALGORITHMS
|
||||||
|
|
||||||
|
## IMSO: Intelligent Multi-Strategy Optimization
|
||||||
|
|
||||||
|
Instead of asking "which algorithm should I use?", IMSO **characterizes your problem and selects automatically**.
|
||||||
|
|
||||||
|
### The Two-Phase Process
|
||||||
|
|
||||||
|
**Phase 1: Characterization (10-30 trials)**
|
||||||
|
- Unbiased sampling (Random or Sobol)
|
||||||
|
- Compute landscape metrics every 5 trials
|
||||||
|
- Stop when confidence reaches 85%
|
||||||
|
|
||||||
|
**Phase 2: Optimized Search**
|
||||||
|
- Algorithm selected based on landscape type:
|
||||||
|
- Smooth unimodal → CMA-ES or GP-BO
|
||||||
|
- Smooth multimodal → GP-BO
|
||||||
|
- Rugged → TPE
|
||||||
|
- Noisy → TPE (most robust)
|
||||||
|
|
||||||
|
### Performance Comparison
|
||||||
|
|
||||||
|
| Problem Type | Random Search | TPE Alone | IMSO |
|
||||||
|
|--------------|--------------|-----------|------|
|
||||||
|
| Smooth unimodal | 150 trials | 80 trials | **45 trials** |
|
||||||
|
| Rugged multimodal | 200 trials | 95 trials | **70 trials** |
|
||||||
|
| Mixed landscape | 180 trials | 100 trials | **56 trials** |
|
||||||
|
|
||||||
|
**Average improvement: 40% fewer trials to convergence**
|
||||||
|
|
||||||
|
## Multi-Objective: NSGA-II
|
||||||
|
|
||||||
|
For problems with competing objectives (mass vs. stiffness, cost vs. performance):
|
||||||
|
- Full Pareto front discovery
|
||||||
|
- Hypervolume tracking for solution quality
|
||||||
|
- Interactive Pareto visualization in dashboard
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 7: NEURAL NETWORK ACCELERATION
|
||||||
|
|
||||||
|
## When FEA is Too Slow
|
||||||
|
|
||||||
|
Single FEA evaluation: 10-30 minutes
|
||||||
|
Exploring 1000 designs: 7-20 days
|
||||||
|
|
||||||
|
**Neural surrogates change this equation entirely.**
|
||||||
|
|
||||||
|
### Performance Comparison
|
||||||
|
|
||||||
|
| Metric | FEA | Neural Network | Speedup |
|
||||||
|
|--------|-----|----------------|---------|
|
||||||
|
| Time per evaluation | 20 min | **4.5 ms** | **266,000x** |
|
||||||
|
| Trials per day | 72 | **19 million** | **263,000x** |
|
||||||
|
| Design exploration | Limited | **Comprehensive** | - |
|
||||||
|
|
||||||
|
### Two Approaches
|
||||||
|
|
||||||
|
**1. MLP Surrogate (Simple, Fast to Train)**
|
||||||
|
- 4-layer network, ~34K parameters
|
||||||
|
- Train on 50-100 FEA samples
|
||||||
|
- 1-5% error for most objectives
|
||||||
|
- Best for: Quick studies, smooth objectives
|
||||||
|
|
||||||
|
**2. Zernike GNN (Physics-Aware, High Accuracy)**
|
||||||
|
- Graph neural network with 1.2M parameters
|
||||||
|
- Predicts full displacement fields
|
||||||
|
- Differentiable Zernike fitting
|
||||||
|
- Best for: Mirror optimization, optical surfaces
|
||||||
|
|
||||||
|
### Turbo Mode Workflow
|
||||||
|
|
||||||
|
```
|
||||||
|
REPEAT until converged:
|
||||||
|
1. Run 5,000 neural predictions (~1 second)
|
||||||
|
2. Select top 5 diverse candidates
|
||||||
|
3. FEA validate those 5 (~25 minutes)
|
||||||
|
4. Retrain neural network with new data
|
||||||
|
5. Check for convergence
|
||||||
|
```
|
||||||
|
|
||||||
|
**Result:** 50 FEA runs explore what would take 1000+ trials traditionally.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 8: SELF-AWARE TURBO (SAT) - VALIDATED BREAKTHROUGH
|
||||||
|
|
||||||
|
## The Problem: Surrogates That Don't Know When They're Wrong
|
||||||
|
|
||||||
|
Traditional neural surrogates have a fatal flaw: **they're confidently wrong in unexplored regions**.
|
||||||
|
|
||||||
|
In V5, we trained an MLP on 129 FEA samples and ran L-BFGS gradient descent on the surrogate. It found a "minimum" at WS=280. We ran FEA. The actual result: WS=376 - a **30%+ error**.
|
||||||
|
|
||||||
|
The surrogate had descended to a region with no training data and predicted with perfect confidence. L-BFGS loves smooth surfaces, and the MLP happily provided one - completely fabricated.
|
||||||
|
|
||||||
|
**Root cause:** The surrogate doesn't know what it doesn't know.
|
||||||
|
|
||||||
|
## The Solution: Self-Aware Turbo (SAT)
|
||||||
|
|
||||||
|
SAT v3 achieved **WS=205.58**, beating all previous methods (V7 TPE: 218.26, V6 TPE: 225.41).
|
||||||
|
|
||||||
|
### Core Principles
|
||||||
|
|
||||||
|
1. **Never trust a point prediction** - Always require uncertainty bounds
|
||||||
|
2. **High uncertainty = run FEA** - Don't optimize where you don't know
|
||||||
|
3. **Actively fill gaps** - Prioritize FEA in high-uncertainty regions
|
||||||
|
4. **Validate gradient solutions** - Check L-BFGS results before trusting
|
||||||
|
|
||||||
|
### Key Innovations
|
||||||
|
|
||||||
|
**1. Ensemble Surrogate (Epistemic Uncertainty)**
|
||||||
|
|
||||||
|
Instead of one MLP, train **5 independent models** with different initializations:
|
||||||
|
|
||||||
|
```python
|
||||||
|
class EnsembleSurrogate:
|
||||||
|
def predict(self, x):
|
||||||
|
preds = [m.predict(x) for m in self.models]
|
||||||
|
mean = np.mean(preds, axis=0)
|
||||||
|
std = np.std(preds, axis=0) # Epistemic uncertainty!
|
||||||
|
return mean, std
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why this works:** Models trained on different seeds agree in well-sampled regions but **disagree wildly in extrapolation regions**.
|
||||||
|
|
||||||
|
**2. Distance-Based Out-of-Distribution Detection**
|
||||||
|
|
||||||
|
Track training data distribution and flag points that are "too far":
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_in_distribution(self, x, threshold=2.0):
|
||||||
|
"""Check if point is within 2 std of training data."""
|
||||||
|
z_scores = np.abs((x - self.mean) / (self.std + 1e-6))
|
||||||
|
return z_scores.max() < threshold
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Adaptive Exploration Schedule**
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_exploration_weight(trial_num):
|
||||||
|
if trial_num <= 30: return 0.15 # Phase 1: 15% exploration
|
||||||
|
elif trial_num <= 80: return 0.08 # Phase 2: 8% exploration
|
||||||
|
else: return 0.03 # Phase 3: 3% exploitation
|
||||||
|
```
|
||||||
|
|
||||||
|
**4. Soft Mass Constraints in Acquisition**
|
||||||
|
|
||||||
|
```python
|
||||||
|
mass_penalty = max(0, pred_mass - 118.0) * 5.0 # Soft threshold at 118 kg
|
||||||
|
acquisition = norm_ws - exploration_weight * norm_dist + norm_mass_penalty
|
||||||
|
```
|
||||||
|
|
||||||
|
### SAT Version History
|
||||||
|
|
||||||
|
| Version | Training Data | Key Fix | Best WS |
|
||||||
|
|---------|---------------|---------|---------|
|
||||||
|
| v1 | 129 samples | - | 218.26 |
|
||||||
|
| v2 | 196 samples | Duplicate prevention | 271.38 (regression!) |
|
||||||
|
| **v3** | **556 samples (V5-V8)** | **Adaptive exploration + mass targeting** | **205.58** |
|
||||||
|
|
||||||
|
### V9 Results (SAT v3)
|
||||||
|
|
||||||
|
| Phase | Trials | Best WS | Mean WS |
|
||||||
|
|-------|--------|---------|---------|
|
||||||
|
| Phase 1 (explore) | 30 | 232.00 | 394.48 |
|
||||||
|
| Phase 2 (balanced) | 50 | 222.01 | 360.51 |
|
||||||
|
| Phase 3 (exploit) | 57+ | **205.58** | 262.57 |
|
||||||
|
|
||||||
|
**Key metrics:**
|
||||||
|
- 100% feasibility rate
|
||||||
|
- 100% unique designs (no duplicates)
|
||||||
|
- Surrogate R² = 0.99
|
||||||
|
|
||||||
|
### When to Use SAT vs Pure TPE
|
||||||
|
|
||||||
|
| Scenario | Recommendation |
|
||||||
|
|----------|----------------|
|
||||||
|
| < 100 existing samples | Pure TPE (not enough for good surrogate) |
|
||||||
|
| 100-500 samples | SAT Phase 1-2 only (no L-BFGS) |
|
||||||
|
| > 500 samples | Full SAT with L-BFGS refinement |
|
||||||
|
| High-dimensional (>20 params) | Pure TPE (curse of dimensionality) |
|
||||||
|
| Noisy FEA | Pure TPE (surrogates struggle with noise) |
|
||||||
|
|
||||||
|
### The Core Insight
|
||||||
|
|
||||||
|
> "A surrogate that knows when it doesn't know is infinitely more valuable than one that's confidently wrong."
|
||||||
|
|
||||||
|
SAT doesn't just optimize faster - it **optimizes safer**. Every prediction comes with uncertainty bounds. Every gradient step is validated. Every extrapolation is flagged.
|
||||||
|
|
||||||
|
This is the difference between a tool that works in demos and a system that works in production.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 9: THE EXTRACTOR LIBRARY
|
||||||
|
|
||||||
|
## 24 Physics Extractors
|
||||||
|
|
||||||
|
Every extractor follows the same pattern: verified API calls, robust error handling, documented sources.
|
||||||
|
|
||||||
|
| ID | Physics | Function | Output |
|
||||||
|
|----|---------|----------|--------|
|
||||||
|
| E1 | Displacement | `extract_displacement()` | mm |
|
||||||
|
| E2 | Frequency | `extract_frequency()` | Hz |
|
||||||
|
| E3 | Von Mises Stress | `extract_solid_stress()` | MPa |
|
||||||
|
| E4-E5 | Mass | BDF or CAD-based | kg |
|
||||||
|
| E8-E10 | Zernike WFE | Standard, relative, builder | nm |
|
||||||
|
| E12-E14 | Advanced Stress | Principal, strain energy, SPC | MPa, J, N |
|
||||||
|
| E15-E17 | Thermal | Temperature, gradient, flux | K, K/mm, W/mm² |
|
||||||
|
| E18 | Modal Mass | From F06 | kg |
|
||||||
|
| E19 | Part Introspection | Full part analysis | dict |
|
||||||
|
| E20-E22 | Zernike OPD | Analytic, comparison, figure | nm |
|
||||||
|
|
||||||
|
### The 20-Line Rule
|
||||||
|
|
||||||
|
If you're writing more than 20 lines of extraction code in your study, you're probably:
|
||||||
|
1. Duplicating existing functionality
|
||||||
|
2. Need to create a proper extractor
|
||||||
|
|
||||||
|
**Always check the library first. If it doesn't exist, propose a new extractor through the protocol evolution workflow.**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 10: DASHBOARD & VISUALIZATION
|
||||||
|
|
||||||
|
## Real-Time Monitoring
|
||||||
|
|
||||||
|
**React + TypeScript + Plotly.js**
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- **Parallel coordinates:** See all design variables and objectives simultaneously
|
||||||
|
- **Pareto front:** 2D/3D visualization of multi-objective trade-offs
|
||||||
|
- **Convergence tracking:** Best-so-far with individual trial scatter
|
||||||
|
- **WebSocket updates:** Live as optimization runs
|
||||||
|
|
||||||
|
### Report Generation
|
||||||
|
|
||||||
|
Automatic markdown reports with:
|
||||||
|
- Study configuration and objectives
|
||||||
|
- Best result with performance metrics
|
||||||
|
- Convergence plots (300 DPI, publication-ready)
|
||||||
|
- Top trials table
|
||||||
|
- Full history (collapsible)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 11: STATISTICS & METRICS
|
||||||
|
|
||||||
|
## Codebase
|
||||||
|
|
||||||
|
| Component | Lines of Code |
|
||||||
|
|-----------|---------------|
|
||||||
|
| Optimization Engine (Python) | **66,204** |
|
||||||
|
| Dashboard (TypeScript) | **54,871** |
|
||||||
|
| Documentation | 999 files |
|
||||||
|
| **Total** | **~120,000+** |
|
||||||
|
|
||||||
|
## Performance
|
||||||
|
|
||||||
|
| Metric | Value |
|
||||||
|
|--------|-------|
|
||||||
|
| Neural inference | **4.5 ms** per trial |
|
||||||
|
| Turbo throughput | **5,000-7,000 trials/sec** |
|
||||||
|
| GNN R² accuracy | **0.95-0.99** |
|
||||||
|
| IMSO improvement | **40% fewer trials** |
|
||||||
|
|
||||||
|
## Coverage
|
||||||
|
|
||||||
|
- **24 physics extractors**
|
||||||
|
- **6+ optimization algorithms**
|
||||||
|
- **7 Nastran solution types** (SOL 101, 103, 105, 106, 111, 112, 153/159)
|
||||||
|
- **3 neural surrogate types** (MLP, GNN, Ensemble)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# PART 12: KEY TAKEAWAYS
|
||||||
|
|
||||||
|
## What Makes Atomizer Different
|
||||||
|
|
||||||
|
1. **Study characterization** - Learn what works for each problem type
|
||||||
|
2. **Persistent memory (LAC)** - Never start from scratch
|
||||||
|
3. **Protocol evolution** - Safe, validated extensibility
|
||||||
|
4. **MCP-first development** - Documentation-driven, not guessing
|
||||||
|
5. **Simulation focus** - Not CAD, not mesh - optimization of simulation performance
|
||||||
|
6. **Self-aware surrogates (SAT)** - Know when predictions are uncertain, validated WS=205.58
|
||||||
|
7. **Interview Mode (NEW)** - Zero-config study creation through natural conversation
|
||||||
|
|
||||||
|
## Sound Bites for Podcast
|
||||||
|
|
||||||
|
- "Atomizer learns what works. After 100 studies, it knows that mirror problems need GP-BO, not TPE."
|
||||||
|
- "When we don't have an extractor, we query official NX documentation first - no guessing."
|
||||||
|
- "New capabilities go through research, review, and approval - just like engineering change orders."
|
||||||
|
- "4.5 milliseconds per prediction means we can explore 50,000 designs before lunch."
|
||||||
|
- "Every study makes the system smarter. That's not marketing - that's LAC."
|
||||||
|
- "SAT knows when it doesn't know. A surrogate that's confidently wrong is worse than no surrogate at all."
|
||||||
|
- "V5 surrogate said WS=280. FEA said WS=376. That's a 30% error from extrapolating into the unknown. SAT v3 fixed that - WS=205.58."
|
||||||
|
- "Just say 'create a study' and Atomizer interviews you. No JSON, no manuals, just conversation."
|
||||||
|
|
||||||
|
## The Core Message
|
||||||
|
|
||||||
|
Atomizer is an **intelligent optimization platform** that:
|
||||||
|
- **Bridges** state-of-the-art algorithms and production FEA workflows
|
||||||
|
- **Learns** what works for different problem types
|
||||||
|
- **Grows** through structured protocol evolution
|
||||||
|
- **Accelerates** design exploration with neural surrogates
|
||||||
|
- **Documents** every decision for traceability
|
||||||
|
|
||||||
|
This isn't just automation - it's **accumulated engineering intelligence**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Atomizer: Where simulation expertise meets optimization science.*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Document Statistics:**
|
||||||
|
- Sections: 13
|
||||||
|
- Focus: Simulation optimization (not CAD/mesh)
|
||||||
|
- Key additions: Study characterization, protocol evolution, MCP-first development, SAT v3, **Study Interview Mode**
|
||||||
|
- Positioning: Optimizer & NX configurator, not "LLM-first"
|
||||||
|
- SAT Performance: Validated WS=205.58 (best ever, beating V7 TPE at 218.26)
|
||||||
|
- Interview Mode: 129 tests passing, 12 materials, 12 anti-patterns, 7 phases
|
||||||
|
|
||||||
|
**Prepared for NotebookLM/AI Podcast Generation**
|
||||||
1172
docs/CONTEXT_ENGINEERING_REPORT.md
Normal file
1172
docs/CONTEXT_ENGINEERING_REPORT.md
Normal file
File diff suppressed because it is too large
Load Diff
132
docs/TODO_NXOPEN_MCP_SETUP.md
Normal file
132
docs/TODO_NXOPEN_MCP_SETUP.md
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
# NXOpen Documentation MCP Server - Setup TODO
|
||||||
|
|
||||||
|
**Created:** 2025-12-29
|
||||||
|
**Status:** PENDING - Waiting for manual configuration
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Current State
|
||||||
|
|
||||||
|
The NXOpen documentation MCP server exists on **dalidou** (192.168.86.50) but is not accessible from this Windows machine due to hostname resolution issues.
|
||||||
|
|
||||||
|
### What's Working
|
||||||
|
- ✅ Dalidou server is online and reachable at `192.168.86.50`
|
||||||
|
- ✅ Port 5000 (Documentation Proxy) is responding
|
||||||
|
- ✅ Port 3000 (Gitea) is responding
|
||||||
|
- ✅ MCP server code exists at `/srv/claude-assistant/` on dalidou
|
||||||
|
|
||||||
|
### What's NOT Working
|
||||||
|
- ❌ `dalidou.local` hostname doesn't resolve (mDNS not configured on this machine)
|
||||||
|
- ❌ MCP tools not integrated with Claude Code
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Steps to Complete
|
||||||
|
|
||||||
|
### Step 1: Fix Hostname Resolution (Manual - requires Admin)
|
||||||
|
|
||||||
|
**Option A: Run the script as Administrator**
|
||||||
|
```powershell
|
||||||
|
# Open PowerShell as Administrator, then:
|
||||||
|
C:\Users\antoi\Atomizer\add_dalidou_host.ps1
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option B: Manually edit hosts file**
|
||||||
|
1. Open Notepad as Administrator
|
||||||
|
2. Open `C:\Windows\System32\drivers\etc\hosts`
|
||||||
|
3. Add this line at the end:
|
||||||
|
```
|
||||||
|
192.168.86.50 dalidou.local dalidou
|
||||||
|
```
|
||||||
|
4. Save the file
|
||||||
|
|
||||||
|
**Verify:**
|
||||||
|
```powershell
|
||||||
|
ping dalidou.local
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Verify MCP Server is Running on Dalidou
|
||||||
|
|
||||||
|
SSH into dalidou and check:
|
||||||
|
```bash
|
||||||
|
ssh root@dalidou
|
||||||
|
|
||||||
|
# Check documentation proxy
|
||||||
|
systemctl status siemensdocumentationproxyserver
|
||||||
|
|
||||||
|
# Check MCP server (if it's a service)
|
||||||
|
# Or check what's running on port 5000
|
||||||
|
ss -tlnp | grep 5000
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Configure Claude Code MCP Integration
|
||||||
|
|
||||||
|
The MCP server on dalidou uses **stdio-based MCP protocol**, not HTTP. To connect from Claude Code, you'll need one of:
|
||||||
|
|
||||||
|
**Option A: SSH-based MCP (if supported)**
|
||||||
|
Configure in `.claude/settings.json` or MCP config to connect via SSH tunnel.
|
||||||
|
|
||||||
|
**Option B: Local Proxy**
|
||||||
|
Run a local MCP proxy that connects to dalidou's MCP server.
|
||||||
|
|
||||||
|
**Option C: HTTP Wrapper**
|
||||||
|
The current port 5000 service may already expose HTTP endpoints - need to verify once hostname is fixed.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Server Documentation Reference
|
||||||
|
|
||||||
|
Full documentation is in the SERVtomaste repo:
|
||||||
|
- **URL:** http://192.168.86.50:3000/Antoine/SERVtomaste
|
||||||
|
- **File:** `docs/SIEMENS-DOCS-SERVER.md`
|
||||||
|
|
||||||
|
### Key Server Paths (on dalidou)
|
||||||
|
```
|
||||||
|
/srv/siemens-docs/proxy/ # Documentation Proxy (port 5000)
|
||||||
|
/srv/claude-assistant/ # MCP Server
|
||||||
|
/srv/claude-assistant/mcp-server/ # MCP server code
|
||||||
|
/srv/claude-assistant/tools/ # Tool implementations
|
||||||
|
├── siemens-auth.js # Puppeteer authentication
|
||||||
|
├── siemens-docs.js # Documentation fetching
|
||||||
|
└── ...
|
||||||
|
/srv/claude-assistant/vault/ # Credentials (secured)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Available MCP Tools (once connected)
|
||||||
|
| Tool | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `siemens_docs_search` | Search NX Open, Simcenter docs |
|
||||||
|
| `siemens_docs_fetch` | Fetch specific documentation page |
|
||||||
|
| `siemens_auth_status` | Check if auth session is active |
|
||||||
|
| `siemens_login` | Re-login if session expired |
|
||||||
|
| `siemens_docs_list` | List documentation categories |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files Created During Investigation
|
||||||
|
|
||||||
|
- `C:\Users\antoi\Atomizer\add_dalidou_host.ps1` - Script to add hosts entry (run as Admin)
|
||||||
|
- `C:\Users\antoi\Atomizer\test_mcp.py` - Test script for probing MCP server (can be deleted)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- `.claude/skills/modules/nx-docs-lookup.md` - How to use MCP tools once configured
|
||||||
|
- `docs/08_ARCHIVE/historical/NXOPEN_DOCUMENTATION_INTEGRATION_STRATEGY.md` - Full strategy doc
|
||||||
|
- `docs/05_API_REFERENCE/NXOPEN_RESOURCES.md` - Alternative NXOpen resources
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Workaround Until Fixed
|
||||||
|
|
||||||
|
Without the MCP server, you can still look up NXOpen documentation by:
|
||||||
|
|
||||||
|
1. **Using web search** - I can search for NXOpen API documentation online
|
||||||
|
2. **Using local stub files** - Python stubs at `C:\Program Files\Siemens\NX2412\UGOPEN\pythonStubs\`
|
||||||
|
3. **Using existing extractors** - Check `optimization_engine/extractors/` for patterns
|
||||||
|
4. **Recording NX journals** - Record operations in NX to learn the API calls
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*To continue setup, run the hosts file fix and let me know when ready.*
|
||||||
948
docs/api/CONTEXT_ENGINEERING_API.md
Normal file
948
docs/api/CONTEXT_ENGINEERING_API.md
Normal file
@@ -0,0 +1,948 @@
|
|||||||
|
# Context Engineering API Reference
|
||||||
|
|
||||||
|
**Version**: 1.0
|
||||||
|
**Updated**: 2025-12-29
|
||||||
|
**Module**: `optimization_engine.context`
|
||||||
|
|
||||||
|
This document provides complete API documentation for the Atomizer Context Engineering (ACE) framework.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
1. [Module Overview](#module-overview)
|
||||||
|
2. [Core Classes](#core-classes)
|
||||||
|
- [AtomizerPlaybook](#atomizerplaybook)
|
||||||
|
- [PlaybookItem](#playbookitem)
|
||||||
|
- [InsightCategory](#insightcategory)
|
||||||
|
3. [Session Management](#session-management)
|
||||||
|
- [AtomizerSessionState](#atomizersessionstate)
|
||||||
|
- [ExposedState](#exposedstate)
|
||||||
|
- [IsolatedState](#isolatedstate)
|
||||||
|
- [TaskType](#tasktype)
|
||||||
|
4. [Analysis & Learning](#analysis--learning)
|
||||||
|
- [AtomizerReflector](#atomizerreflector)
|
||||||
|
- [FeedbackLoop](#feedbackloop)
|
||||||
|
5. [Optimization](#optimization)
|
||||||
|
- [CompactionManager](#compactionmanager)
|
||||||
|
- [ContextCacheOptimizer](#contextcacheoptimizer)
|
||||||
|
6. [Integration](#integration)
|
||||||
|
- [ContextEngineeringMixin](#contextengineeringmixin)
|
||||||
|
- [ContextAwareRunner](#contextawarerunner)
|
||||||
|
7. [REST API](#rest-api)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Module Overview
|
||||||
|
|
||||||
|
### Import Patterns
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Full import
|
||||||
|
from optimization_engine.context import (
|
||||||
|
# Core playbook
|
||||||
|
AtomizerPlaybook,
|
||||||
|
PlaybookItem,
|
||||||
|
InsightCategory,
|
||||||
|
|
||||||
|
# Session management
|
||||||
|
AtomizerSessionState,
|
||||||
|
ExposedState,
|
||||||
|
IsolatedState,
|
||||||
|
TaskType,
|
||||||
|
get_session,
|
||||||
|
|
||||||
|
# Analysis
|
||||||
|
AtomizerReflector,
|
||||||
|
OptimizationOutcome,
|
||||||
|
InsightCandidate,
|
||||||
|
|
||||||
|
# Learning
|
||||||
|
FeedbackLoop,
|
||||||
|
FeedbackLoopFactory,
|
||||||
|
|
||||||
|
# Optimization
|
||||||
|
CompactionManager,
|
||||||
|
ContextEvent,
|
||||||
|
EventType,
|
||||||
|
ContextBudgetManager,
|
||||||
|
ContextCacheOptimizer,
|
||||||
|
CacheStats,
|
||||||
|
StablePrefixBuilder,
|
||||||
|
|
||||||
|
# Integration
|
||||||
|
ContextEngineeringMixin,
|
||||||
|
ContextAwareRunner,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convenience imports
|
||||||
|
from optimization_engine.context import AtomizerPlaybook, get_session
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Core Classes
|
||||||
|
|
||||||
|
### AtomizerPlaybook
|
||||||
|
|
||||||
|
The central knowledge store for persistent learning across sessions.
|
||||||
|
|
||||||
|
#### Constructor
|
||||||
|
|
||||||
|
```python
|
||||||
|
AtomizerPlaybook(
|
||||||
|
items: Dict[str, PlaybookItem] = None,
|
||||||
|
version: int = 1,
|
||||||
|
created_at: str = None,
|
||||||
|
last_updated: str = None
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Class Methods
|
||||||
|
|
||||||
|
##### `load(path: Path) -> AtomizerPlaybook`
|
||||||
|
Load playbook from JSON file.
|
||||||
|
|
||||||
|
```python
|
||||||
|
playbook = AtomizerPlaybook.load(Path("knowledge_base/playbook.json"))
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `path`: Path to JSON file
|
||||||
|
|
||||||
|
**Returns:** AtomizerPlaybook instance
|
||||||
|
|
||||||
|
**Raises:** FileNotFoundError if file doesn't exist (creates new if not found)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### Instance Methods
|
||||||
|
|
||||||
|
##### `save(path: Path) -> None`
|
||||||
|
Save playbook to JSON file.
|
||||||
|
|
||||||
|
```python
|
||||||
|
playbook.save(Path("knowledge_base/playbook.json"))
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `add_insight(category, content, source_trial=None, tags=None) -> PlaybookItem`
|
||||||
|
Add a new insight to the playbook.
|
||||||
|
|
||||||
|
```python
|
||||||
|
item = playbook.add_insight(
|
||||||
|
category=InsightCategory.STRATEGY,
|
||||||
|
content="CMA-ES converges faster on smooth surfaces",
|
||||||
|
source_trial=42,
|
||||||
|
tags=["sampler", "convergence", "mirror"]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `category` (InsightCategory): Category of the insight
|
||||||
|
- `content` (str): The insight content
|
||||||
|
- `source_trial` (int, optional): Trial number that generated this insight
|
||||||
|
- `tags` (List[str], optional): Tags for filtering
|
||||||
|
|
||||||
|
**Returns:** The created PlaybookItem
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `record_outcome(item_id: str, helpful: bool) -> None`
|
||||||
|
Record whether an insight was helpful or harmful.
|
||||||
|
|
||||||
|
```python
|
||||||
|
playbook.record_outcome("str_001", helpful=True)
|
||||||
|
playbook.record_outcome("mis_003", helpful=False)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `item_id` (str): ID of the playbook item
|
||||||
|
- `helpful` (bool): True if helpful, False if harmful
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `get_context_for_task(task_type, max_items=15, min_confidence=0.5) -> str`
|
||||||
|
Get formatted context string for LLM consumption.
|
||||||
|
|
||||||
|
```python
|
||||||
|
context = playbook.get_context_for_task(
|
||||||
|
task_type="optimization",
|
||||||
|
max_items=15,
|
||||||
|
min_confidence=0.5
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `task_type` (str): Type of task for filtering
|
||||||
|
- `max_items` (int): Maximum items to include
|
||||||
|
- `min_confidence` (float): Minimum confidence threshold (0.0-1.0)
|
||||||
|
|
||||||
|
**Returns:** Formatted string suitable for LLM context
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `get_by_category(category, min_score=0) -> List[PlaybookItem]`
|
||||||
|
Get items filtered by category.
|
||||||
|
|
||||||
|
```python
|
||||||
|
mistakes = playbook.get_by_category(InsightCategory.MISTAKE, min_score=-2)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `category` (InsightCategory): Category to filter by
|
||||||
|
- `min_score` (int): Minimum net score
|
||||||
|
|
||||||
|
**Returns:** List of matching PlaybookItems
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `get_stats() -> Dict`
|
||||||
|
Get playbook statistics.
|
||||||
|
|
||||||
|
```python
|
||||||
|
stats = playbook.get_stats()
|
||||||
|
# Returns:
|
||||||
|
# {
|
||||||
|
# "total_items": 45,
|
||||||
|
# "by_category": {"STRATEGY": 12, "MISTAKE": 8, ...},
|
||||||
|
# "version": 3,
|
||||||
|
# "last_updated": "2025-12-29T10:30:00",
|
||||||
|
# "avg_score": 2.4,
|
||||||
|
# "max_score": 15,
|
||||||
|
# "min_score": -3
|
||||||
|
# }
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `prune_harmful(threshold=-3) -> int`
|
||||||
|
Remove items with net score below threshold.
|
||||||
|
|
||||||
|
```python
|
||||||
|
removed_count = playbook.prune_harmful(threshold=-3)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `threshold` (int): Items with net_score <= threshold are removed
|
||||||
|
|
||||||
|
**Returns:** Number of items removed
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PlaybookItem
|
||||||
|
|
||||||
|
Dataclass representing a single playbook entry.
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class PlaybookItem:
|
||||||
|
id: str # e.g., "str_001", "mis_003"
|
||||||
|
category: InsightCategory # Category enum
|
||||||
|
content: str # The insight text
|
||||||
|
helpful_count: int = 0 # Times marked helpful
|
||||||
|
harmful_count: int = 0 # Times marked harmful
|
||||||
|
tags: List[str] = field(default_factory=list)
|
||||||
|
source_trial: Optional[int] = None
|
||||||
|
created_at: str = "" # ISO timestamp
|
||||||
|
last_used: Optional[str] = None # ISO timestamp
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Properties
|
||||||
|
|
||||||
|
```python
|
||||||
|
item.net_score # helpful_count - harmful_count
|
||||||
|
item.confidence # helpful / (helpful + harmful), or 0.5 if no feedback
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Methods
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Convert to context string for LLM
|
||||||
|
context_str = item.to_context_string()
|
||||||
|
# "[str_001] helpful=5 harmful=0 :: CMA-ES converges faster..."
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### InsightCategory
|
||||||
|
|
||||||
|
Enum for categorizing insights.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class InsightCategory(Enum):
|
||||||
|
STRATEGY = "str" # Optimization strategies that work
|
||||||
|
CALCULATION = "cal" # Formulas and calculations
|
||||||
|
MISTAKE = "mis" # Common mistakes to avoid
|
||||||
|
TOOL = "tool" # Tool usage patterns
|
||||||
|
DOMAIN = "dom" # Domain-specific knowledge (FEA, NX)
|
||||||
|
WORKFLOW = "wf" # Workflow patterns
|
||||||
|
```
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
```python
|
||||||
|
# Create with enum
|
||||||
|
category = InsightCategory.STRATEGY
|
||||||
|
|
||||||
|
# Create from string
|
||||||
|
category = InsightCategory("str")
|
||||||
|
|
||||||
|
# Get string value
|
||||||
|
value = InsightCategory.STRATEGY.value # "str"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Session Management
|
||||||
|
|
||||||
|
### AtomizerSessionState
|
||||||
|
|
||||||
|
Manages session context with exposed/isolated separation.
|
||||||
|
|
||||||
|
#### Constructor
|
||||||
|
|
||||||
|
```python
|
||||||
|
session = AtomizerSessionState(
|
||||||
|
session_id: str = None # Auto-generated UUID if not provided
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Attributes
|
||||||
|
|
||||||
|
```python
|
||||||
|
session.session_id # Unique session identifier
|
||||||
|
session.exposed # ExposedState - always in LLM context
|
||||||
|
session.isolated # IsolatedState - on-demand access only
|
||||||
|
session.last_updated # ISO timestamp of last update
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Methods
|
||||||
|
|
||||||
|
##### `get_llm_context() -> str`
|
||||||
|
Get exposed state formatted for LLM context.
|
||||||
|
|
||||||
|
```python
|
||||||
|
context = session.get_llm_context()
|
||||||
|
# Returns formatted string with task type, study info, progress, etc.
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `add_action(action: str) -> None`
|
||||||
|
Record an action (keeps last 20).
|
||||||
|
|
||||||
|
```python
|
||||||
|
session.add_action("Started optimization with TPE sampler")
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `add_error(error: str, error_type: str = None) -> None`
|
||||||
|
Record an error (keeps last 10).
|
||||||
|
|
||||||
|
```python
|
||||||
|
session.add_error("NX solver timeout after 600s", error_type="solver")
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `to_dict() / from_dict(data) -> AtomizerSessionState`
|
||||||
|
Serialize/deserialize session state.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Save
|
||||||
|
data = session.to_dict()
|
||||||
|
|
||||||
|
# Restore
|
||||||
|
session = AtomizerSessionState.from_dict(data)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### ExposedState
|
||||||
|
|
||||||
|
State that's always included in LLM context.
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class ExposedState:
|
||||||
|
task_type: Optional[TaskType] = None
|
||||||
|
study_name: Optional[str] = None
|
||||||
|
study_status: str = "idle"
|
||||||
|
trials_completed: int = 0
|
||||||
|
trials_total: int = 0
|
||||||
|
best_value: Optional[float] = None
|
||||||
|
recent_actions: List[str] = field(default_factory=list) # Last 20
|
||||||
|
recent_errors: List[str] = field(default_factory=list) # Last 10
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### IsolatedState
|
||||||
|
|
||||||
|
State available on-demand but not in default context.
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class IsolatedState:
|
||||||
|
full_trial_history: List[Dict] = field(default_factory=list)
|
||||||
|
detailed_errors: List[Dict] = field(default_factory=list)
|
||||||
|
performance_metrics: Dict = field(default_factory=dict)
|
||||||
|
debug_info: Dict = field(default_factory=dict)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### TaskType
|
||||||
|
|
||||||
|
Enum for session task classification.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class TaskType(Enum):
|
||||||
|
CREATE_STUDY = "create_study"
|
||||||
|
RUN_OPTIMIZATION = "run_optimization"
|
||||||
|
MONITOR_PROGRESS = "monitor_progress"
|
||||||
|
ANALYZE_RESULTS = "analyze_results"
|
||||||
|
DEBUG_ERROR = "debug_error"
|
||||||
|
CONFIGURE_SETTINGS = "configure_settings"
|
||||||
|
NEURAL_ACCELERATION = "neural_acceleration"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### get_session()
|
||||||
|
|
||||||
|
Get or create the global session instance.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from optimization_engine.context import get_session
|
||||||
|
|
||||||
|
session = get_session()
|
||||||
|
session.exposed.task_type = TaskType.RUN_OPTIMIZATION
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Analysis & Learning
|
||||||
|
|
||||||
|
### AtomizerReflector
|
||||||
|
|
||||||
|
Analyzes optimization outcomes and extracts insights.
|
||||||
|
|
||||||
|
#### Constructor
|
||||||
|
|
||||||
|
```python
|
||||||
|
reflector = AtomizerReflector(playbook: AtomizerPlaybook)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Methods
|
||||||
|
|
||||||
|
##### `analyze_outcome(outcome: OptimizationOutcome) -> List[InsightCandidate]`
|
||||||
|
Analyze an optimization outcome for insights.
|
||||||
|
|
||||||
|
```python
|
||||||
|
outcome = OptimizationOutcome(
|
||||||
|
study_name="bracket_v3",
|
||||||
|
trial_number=42,
|
||||||
|
params={'thickness': 10.5},
|
||||||
|
objectives={'mass': 5.2},
|
||||||
|
constraints_satisfied=True,
|
||||||
|
error_message=None,
|
||||||
|
solve_time=45.2
|
||||||
|
)
|
||||||
|
|
||||||
|
insights = reflector.analyze_outcome(outcome)
|
||||||
|
for insight in insights:
|
||||||
|
print(f"{insight.category}: {insight.content}")
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `extract_error_insights(error_message: str) -> List[InsightCandidate]`
|
||||||
|
Extract insights from error messages.
|
||||||
|
|
||||||
|
```python
|
||||||
|
insights = reflector.extract_error_insights("Solution did not converge within tolerance")
|
||||||
|
# Returns insights about convergence failures
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### OptimizationOutcome
|
||||||
|
|
||||||
|
Dataclass for optimization trial outcomes.
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class OptimizationOutcome:
|
||||||
|
study_name: str
|
||||||
|
trial_number: int
|
||||||
|
params: Dict[str, Any]
|
||||||
|
objectives: Dict[str, float]
|
||||||
|
constraints_satisfied: bool
|
||||||
|
error_message: Optional[str] = None
|
||||||
|
solve_time: Optional[float] = None
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### FeedbackLoop
|
||||||
|
|
||||||
|
Automated learning from optimization execution.
|
||||||
|
|
||||||
|
#### Constructor
|
||||||
|
|
||||||
|
```python
|
||||||
|
feedback = FeedbackLoop(playbook_path: Path)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Methods
|
||||||
|
|
||||||
|
##### `process_trial_result(trial_number, params, objectives, is_feasible, error=None)`
|
||||||
|
Process a trial result for learning opportunities.
|
||||||
|
|
||||||
|
```python
|
||||||
|
feedback.process_trial_result(
|
||||||
|
trial_number=42,
|
||||||
|
params={'thickness': 10.5, 'width': 25.0},
|
||||||
|
objectives={'mass': 5.2, 'stress': 180.0},
|
||||||
|
is_feasible=True,
|
||||||
|
error=None
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `finalize_study(study_summary: Dict) -> Dict`
|
||||||
|
Finalize learning at end of optimization study.
|
||||||
|
|
||||||
|
```python
|
||||||
|
result = feedback.finalize_study({
|
||||||
|
"name": "bracket_v3",
|
||||||
|
"total_trials": 100,
|
||||||
|
"best_value": 4.8,
|
||||||
|
"convergence_rate": 0.95
|
||||||
|
})
|
||||||
|
# Returns: {"insights_added": 3, "patterns_identified": ["fast_convergence"]}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Optimization
|
||||||
|
|
||||||
|
### CompactionManager
|
||||||
|
|
||||||
|
Handles context compaction for long-running sessions.
|
||||||
|
|
||||||
|
#### Constructor
|
||||||
|
|
||||||
|
```python
|
||||||
|
compactor = CompactionManager(
|
||||||
|
max_events: int = 100,
|
||||||
|
preserve_errors: bool = True,
|
||||||
|
preserve_milestones: bool = True
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Methods
|
||||||
|
|
||||||
|
##### `add_event(event: ContextEvent) -> None`
|
||||||
|
Add an event to the session history.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from optimization_engine.context import ContextEvent, EventType
|
||||||
|
|
||||||
|
event = ContextEvent(
|
||||||
|
event_type=EventType.TRIAL_COMPLETE,
|
||||||
|
content="Trial 42 completed: mass=5.2kg",
|
||||||
|
timestamp=datetime.now().isoformat(),
|
||||||
|
is_error=False,
|
||||||
|
is_milestone=False
|
||||||
|
)
|
||||||
|
compactor.add_event(event)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `maybe_compact() -> Optional[str]`
|
||||||
|
Compact events if over threshold.
|
||||||
|
|
||||||
|
```python
|
||||||
|
summary = compactor.maybe_compact()
|
||||||
|
if summary:
|
||||||
|
print(f"Compacted: {summary}")
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `get_context() -> str`
|
||||||
|
Get current context string.
|
||||||
|
|
||||||
|
```python
|
||||||
|
context = compactor.get_context()
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### ContextCacheOptimizer
|
||||||
|
|
||||||
|
Monitors and optimizes KV-cache efficiency.
|
||||||
|
|
||||||
|
#### Constructor
|
||||||
|
|
||||||
|
```python
|
||||||
|
optimizer = ContextCacheOptimizer()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Methods
|
||||||
|
|
||||||
|
##### `track_request(prefix_tokens: int, total_tokens: int)`
|
||||||
|
Track a request for cache analysis.
|
||||||
|
|
||||||
|
```python
|
||||||
|
optimizer.track_request(prefix_tokens=5000, total_tokens=15000)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `track_completion(success: bool, response_tokens: int)`
|
||||||
|
Track completion for performance analysis.
|
||||||
|
|
||||||
|
```python
|
||||||
|
optimizer.track_completion(success=True, response_tokens=500)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `get_stats_dict() -> Dict`
|
||||||
|
Get cache statistics.
|
||||||
|
|
||||||
|
```python
|
||||||
|
stats = optimizer.get_stats_dict()
|
||||||
|
# Returns:
|
||||||
|
# {
|
||||||
|
# "total_requests": 150,
|
||||||
|
# "cache_hits": 120,
|
||||||
|
# "cache_hit_rate": 0.8,
|
||||||
|
# "avg_prefix_ratio": 0.33,
|
||||||
|
# ...
|
||||||
|
# }
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
##### `get_report() -> str`
|
||||||
|
Get human-readable report.
|
||||||
|
|
||||||
|
```python
|
||||||
|
report = optimizer.get_report()
|
||||||
|
print(report)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Integration
|
||||||
|
|
||||||
|
### ContextEngineeringMixin
|
||||||
|
|
||||||
|
Mixin class for adding context engineering to optimization runners.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ContextEngineeringMixin:
|
||||||
|
def init_context_engineering(self, playbook_path: Path):
|
||||||
|
"""Initialize context engineering components."""
|
||||||
|
|
||||||
|
def record_trial_outcome(self, trial_number, params, objectives,
|
||||||
|
is_feasible, error=None):
|
||||||
|
"""Record trial outcome for learning."""
|
||||||
|
|
||||||
|
def get_context_for_llm(self) -> str:
|
||||||
|
"""Get combined context for LLM consumption."""
|
||||||
|
|
||||||
|
def finalize_context_engineering(self, study_summary: Dict):
|
||||||
|
"""Finalize learning at study completion."""
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### ContextAwareRunner
|
||||||
|
|
||||||
|
Pre-built runner with context engineering enabled.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from optimization_engine.context import ContextAwareRunner
|
||||||
|
|
||||||
|
runner = ContextAwareRunner(
|
||||||
|
config=config_dict,
|
||||||
|
playbook_path=Path("knowledge_base/playbook.json")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run optimization with automatic learning
|
||||||
|
runner.run()
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## REST API
|
||||||
|
|
||||||
|
The Context Engineering module exposes REST endpoints via FastAPI.
|
||||||
|
|
||||||
|
### Base URL
|
||||||
|
```
|
||||||
|
http://localhost:5000/api/context
|
||||||
|
```
|
||||||
|
|
||||||
|
### Endpoints
|
||||||
|
|
||||||
|
#### GET `/playbook`
|
||||||
|
Get playbook summary statistics.
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"total_items": 45,
|
||||||
|
"by_category": {"STRATEGY": 12, "MISTAKE": 8},
|
||||||
|
"version": 3,
|
||||||
|
"last_updated": "2025-12-29T10:30:00",
|
||||||
|
"avg_score": 2.4,
|
||||||
|
"top_score": 15,
|
||||||
|
"lowest_score": -3
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### GET `/playbook/items`
|
||||||
|
List playbook items with optional filters.
|
||||||
|
|
||||||
|
**Query Parameters:**
|
||||||
|
- `category` (str): Filter by category (str, mis, tool, cal, dom, wf)
|
||||||
|
- `min_score` (int): Minimum net score (default: 0)
|
||||||
|
- `min_confidence` (float): Minimum confidence (default: 0.0)
|
||||||
|
- `limit` (int): Max items (default: 50)
|
||||||
|
- `offset` (int): Pagination offset (default: 0)
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "str_001",
|
||||||
|
"category": "str",
|
||||||
|
"content": "CMA-ES converges faster on smooth surfaces",
|
||||||
|
"helpful_count": 5,
|
||||||
|
"harmful_count": 0,
|
||||||
|
"net_score": 5,
|
||||||
|
"confidence": 1.0,
|
||||||
|
"tags": ["sampler", "convergence"],
|
||||||
|
"created_at": "2025-12-29T10:00:00",
|
||||||
|
"last_used": "2025-12-29T10:30:00"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### GET `/playbook/items/{item_id}`
|
||||||
|
Get a specific playbook item.
|
||||||
|
|
||||||
|
**Response:** Single PlaybookItemResponse object
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### POST `/playbook/feedback`
|
||||||
|
Record feedback on a playbook item.
|
||||||
|
|
||||||
|
**Request Body:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"item_id": "str_001",
|
||||||
|
"helpful": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"item_id": "str_001",
|
||||||
|
"new_score": 6,
|
||||||
|
"new_confidence": 1.0,
|
||||||
|
"helpful_count": 6,
|
||||||
|
"harmful_count": 0
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### POST `/playbook/insights`
|
||||||
|
Add a new insight.
|
||||||
|
|
||||||
|
**Request Body:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"category": "str",
|
||||||
|
"content": "New insight content",
|
||||||
|
"tags": ["tag1", "tag2"],
|
||||||
|
"source_trial": 42
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"item_id": "str_015",
|
||||||
|
"category": "str",
|
||||||
|
"content": "New insight content",
|
||||||
|
"message": "Insight added successfully"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### DELETE `/playbook/items/{item_id}`
|
||||||
|
Delete a playbook item.
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"deleted": "str_001",
|
||||||
|
"content_preview": "CMA-ES converges faster..."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### POST `/playbook/prune`
|
||||||
|
Remove harmful items.
|
||||||
|
|
||||||
|
**Query Parameters:**
|
||||||
|
- `threshold` (int): Net score threshold (default: -3)
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"items_pruned": 3,
|
||||||
|
"threshold_used": -3,
|
||||||
|
"remaining_items": 42
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### GET `/playbook/context`
|
||||||
|
Get playbook context for LLM consumption.
|
||||||
|
|
||||||
|
**Query Parameters:**
|
||||||
|
- `task_type` (str): Task type (default: "optimization")
|
||||||
|
- `max_items` (int): Maximum items (default: 15)
|
||||||
|
- `min_confidence` (float): Minimum confidence (default: 0.5)
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"context": "## Atomizer Knowledge Base\n...",
|
||||||
|
"items_included": 15,
|
||||||
|
"task_type": "optimization"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### GET `/session`
|
||||||
|
Get current session state.
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"session_id": "abc123",
|
||||||
|
"task_type": "run_optimization",
|
||||||
|
"study_name": "bracket_v3",
|
||||||
|
"study_status": "running",
|
||||||
|
"trials_completed": 42,
|
||||||
|
"trials_total": 100,
|
||||||
|
"best_value": 5.2,
|
||||||
|
"recent_actions": ["Started optimization", "Trial 42 complete"],
|
||||||
|
"recent_errors": []
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### GET `/session/context`
|
||||||
|
Get session context for LLM consumption.
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"context": "## Current Session\nTask: run_optimization\n...",
|
||||||
|
"session_id": "abc123",
|
||||||
|
"last_updated": "2025-12-29T10:30:00"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### GET `/cache/stats`
|
||||||
|
Get KV-cache statistics.
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"stats": {
|
||||||
|
"total_requests": 150,
|
||||||
|
"cache_hits": 120,
|
||||||
|
"cache_hit_rate": 0.8
|
||||||
|
},
|
||||||
|
"report": "Cache Performance Report\n..."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### GET `/learning/report`
|
||||||
|
Get comprehensive learning report.
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"generated_at": "2025-12-29T10:30:00",
|
||||||
|
"playbook_stats": {...},
|
||||||
|
"top_performers": [
|
||||||
|
{"id": "str_001", "content": "...", "score": 15}
|
||||||
|
],
|
||||||
|
"worst_performers": [
|
||||||
|
{"id": "mis_003", "content": "...", "score": -2}
|
||||||
|
],
|
||||||
|
"recommendations": [
|
||||||
|
"Consider pruning 3 harmful items (net_score < -3)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
All API endpoints return appropriate HTTP status codes:
|
||||||
|
|
||||||
|
| Code | Meaning |
|
||||||
|
|------|---------|
|
||||||
|
| 200 | Success |
|
||||||
|
| 400 | Bad request (invalid parameters) |
|
||||||
|
| 404 | Not found (item doesn't exist) |
|
||||||
|
| 500 | Server error (module not available) |
|
||||||
|
|
||||||
|
Error response format:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"detail": "Error description"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## See Also
|
||||||
|
|
||||||
|
- [Context Engineering Report](../CONTEXT_ENGINEERING_REPORT.md) - Full implementation report
|
||||||
|
- [SYS_17 Protocol](../protocols/system/SYS_17_CONTEXT_ENGINEERING.md) - System protocol
|
||||||
|
- [Cheatsheet](../../.claude/skills/01_CHEATSHEET.md) - Quick reference
|
||||||
1786
docs/plans/ATOMIZER_CONTEXT_ENGINEERING_PLAN.md
Normal file
1786
docs/plans/ATOMIZER_CONTEXT_ENGINEERING_PLAN.md
Normal file
File diff suppressed because it is too large
Load Diff
1323
docs/plans/ATOMIZER_STUDY_INTERVIEW_MODE_IMPLEMENTATION_PLAN.md
Normal file
1323
docs/plans/ATOMIZER_STUDY_INTERVIEW_MODE_IMPLEMENTATION_PLAN.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,297 @@
|
|||||||
|
# Study Interview Mode - Implementation TODO
|
||||||
|
|
||||||
|
**Created**: 2026-01-02
|
||||||
|
**Source**: [ATOMIZER_STUDY_INTERVIEW_MODE_IMPLEMENTATION_PLAN.md](ATOMIZER_STUDY_INTERVIEW_MODE_IMPLEMENTATION_PLAN.md)
|
||||||
|
**Status**: COMPLETE - All Tasks Done
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This document tracks the Interview Mode implementation. **All core components have been implemented and tests pass (129/129).**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 1: Foundation - COMPLETE
|
||||||
|
|
||||||
|
### 1.1 Directory Structure Setup
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
**Files Created**:
|
||||||
|
```
|
||||||
|
optimization_engine/interview/
|
||||||
|
├── __init__.py
|
||||||
|
├── study_interview.py
|
||||||
|
├── question_engine.py
|
||||||
|
├── interview_state.py
|
||||||
|
├── interview_presenter.py
|
||||||
|
├── interview_intelligence.py
|
||||||
|
├── engineering_validator.py
|
||||||
|
├── study_blueprint.py
|
||||||
|
└── schemas/
|
||||||
|
├── interview_questions.json
|
||||||
|
├── materials_database.json
|
||||||
|
└── anti_patterns.json
|
||||||
|
|
||||||
|
tests/interview/
|
||||||
|
├── __init__.py
|
||||||
|
├── test_interview_state.py
|
||||||
|
├── test_question_engine.py
|
||||||
|
├── test_interview_presenter.py
|
||||||
|
├── test_engineering_validator.py
|
||||||
|
├── test_study_blueprint.py
|
||||||
|
└── test_study_interview.py
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 1.2 InterviewState Dataclass
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Implemented in `interview_state.py`:
|
||||||
|
- InterviewState dataclass with all fields
|
||||||
|
- JSON serialization (to_json(), from_json())
|
||||||
|
- InterviewPhase enum with transitions
|
||||||
|
- Helper methods: is_complete(), progress_percentage(), add_warning(), etc.
|
||||||
|
- AnsweredQuestion and LogEntry dataclasses
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 1.3 InterviewStateManager
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Implemented in `interview_state.py`:
|
||||||
|
- Directory creation (.interview/, .interview/backups/)
|
||||||
|
- Atomic save with backup rotation
|
||||||
|
- Lock file mechanism
|
||||||
|
- Log file appending (INTERVIEW_LOG.md)
|
||||||
|
- History tracking
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2: Question Engine - COMPLETE
|
||||||
|
|
||||||
|
### 2.1 Question Schema
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Created `schemas/interview_questions.json`:
|
||||||
|
- 17 questions across 7 categories
|
||||||
|
- Conditional logic definitions
|
||||||
|
- Dynamic option population support
|
||||||
|
- Engineering guidance per question
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2.2 QuestionEngine
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Implemented in `question_engine.py`:
|
||||||
|
- Schema loading and parsing
|
||||||
|
- Conditional evaluation (and/or/not/equals/contains/introspection_has)
|
||||||
|
- Dynamic option population from introspection
|
||||||
|
- Answer validation
|
||||||
|
- Category ordering
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2.3 Interview Presenters
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Implemented in `interview_presenter.py`:
|
||||||
|
- InterviewPresenter abstract base class
|
||||||
|
- ClaudePresenter (markdown formatting)
|
||||||
|
- DashboardPresenter (JSON events)
|
||||||
|
- CLIPresenter (plain text)
|
||||||
|
- Response parsing for all question types
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 3: Intelligence Layer - COMPLETE
|
||||||
|
|
||||||
|
### 3.1 ExtractorMapper
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Implemented in `interview_intelligence.py`:
|
||||||
|
- GOAL_MAP for goal-to-extractor mapping
|
||||||
|
- Support for all extractors E1-E10
|
||||||
|
- Auto-assignment based on optimization goal
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3.2 Materials Database
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Created `schemas/materials_database.json`:
|
||||||
|
- 12 common engineering materials
|
||||||
|
- Properties: yield stress, ultimate stress, density, modulus
|
||||||
|
- Safety factors by application
|
||||||
|
- Fuzzy name matching implemented
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3.3 Anti-Pattern Detector
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Created `schemas/anti_patterns.json` and implemented in `engineering_validator.py`:
|
||||||
|
- 12 anti-pattern definitions
|
||||||
|
- Severity levels (error, warning, info)
|
||||||
|
- Fix suggestions
|
||||||
|
- Pattern detection logic
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3.4 Engineering Validator
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Implemented in `engineering_validator.py`:
|
||||||
|
- MaterialsDatabase class with fuzzy matching
|
||||||
|
- AntiPatternDetector class
|
||||||
|
- EngineeringValidator combining both
|
||||||
|
- Constraint validation (stress, displacement, frequency)
|
||||||
|
- Bounds suggestion
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3.5 Interview Intelligence
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Implemented in `interview_intelligence.py`:
|
||||||
|
- Complexity determination (simple/moderate/complex)
|
||||||
|
- Question estimation
|
||||||
|
- Recommended settings generation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 4: Blueprint & Generation - COMPLETE
|
||||||
|
|
||||||
|
### 4.1 StudyBlueprint
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Implemented in `study_blueprint.py`:
|
||||||
|
- DesignVariable, Objective, Constraint dataclasses
|
||||||
|
- StudyBlueprint with all configuration
|
||||||
|
- to_config_json() for optimization_config.json format
|
||||||
|
- to_markdown() for summary display
|
||||||
|
- Validation methods
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4.2 BlueprintBuilder
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Implemented in `study_blueprint.py`:
|
||||||
|
- from_interview_state() method
|
||||||
|
- Automatic extractor assignment
|
||||||
|
- Trial count calculation
|
||||||
|
- Sampler selection
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4.3 StudyInterviewEngine
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Implemented in `study_interview.py`:
|
||||||
|
- Main orchestrator class
|
||||||
|
- start_interview() with resume support
|
||||||
|
- get_first_question() / process_answer() flow
|
||||||
|
- Warning acknowledgment
|
||||||
|
- Blueprint generation and modification
|
||||||
|
- State persistence
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 5: Integration - COMPLETE
|
||||||
|
|
||||||
|
### 5.1 Skill File
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Created `.claude/skills/modules/study-interview-mode.md`:
|
||||||
|
- Usage documentation
|
||||||
|
- Example conversation
|
||||||
|
- Integration guide
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5.2 Protocol Updates
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Completed:
|
||||||
|
- [x] Update OP_01_CREATE_STUDY.md with interview phase
|
||||||
|
- [x] Update 00_BOOTSTRAP.md task routing
|
||||||
|
- [x] Update CLAUDE.md with interview instructions
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 6: Testing - COMPLETE
|
||||||
|
|
||||||
|
### 6.1 Unit Tests
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
All tests pass: **129/129**
|
||||||
|
|
||||||
|
Test files created:
|
||||||
|
- test_interview_state.py (23 tests)
|
||||||
|
- test_question_engine.py (20 tests)
|
||||||
|
- test_interview_presenter.py (16 tests)
|
||||||
|
- test_engineering_validator.py (32 tests)
|
||||||
|
- test_study_blueprint.py (22 tests)
|
||||||
|
- test_study_interview.py (16 tests)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 6.2 Integration Tests
|
||||||
|
**Status**: `[x]` COMPLETE
|
||||||
|
|
||||||
|
Integration tests in test_study_interview.py:
|
||||||
|
- Full interview flow
|
||||||
|
- Resume functionality
|
||||||
|
- Blueprint generation
|
||||||
|
- Warning handling
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
| Phase | Status | Completion |
|
||||||
|
|-------|--------|------------|
|
||||||
|
| 1. Foundation | COMPLETE | 100% |
|
||||||
|
| 2. Question Engine | COMPLETE | 100% |
|
||||||
|
| 3. Intelligence | COMPLETE | 100% |
|
||||||
|
| 4. Blueprint | COMPLETE | 100% |
|
||||||
|
| 5. Integration | COMPLETE | 100% |
|
||||||
|
| 6. Testing | COMPLETE | 100% |
|
||||||
|
|
||||||
|
**Overall**: 100% Complete
|
||||||
|
|
||||||
|
**All Tasks Done**:
|
||||||
|
- [x] All 129 tests passing
|
||||||
|
- [x] All protocol updates complete
|
||||||
|
- [x] Skill file created
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```python
|
||||||
|
from optimization_engine.interview import StudyInterviewEngine
|
||||||
|
|
||||||
|
# Create engine
|
||||||
|
engine = StudyInterviewEngine(study_path)
|
||||||
|
|
||||||
|
# Start interview
|
||||||
|
session = engine.start_interview("my_study", introspection=introspection_data)
|
||||||
|
|
||||||
|
# Get first question
|
||||||
|
action = engine.get_first_question()
|
||||||
|
print(action.message)
|
||||||
|
|
||||||
|
# Process answers in loop
|
||||||
|
while action.action_type == "ask_question":
|
||||||
|
user_response = input()
|
||||||
|
action = engine.process_answer(user_response)
|
||||||
|
|
||||||
|
# When complete
|
||||||
|
if action.action_type == "show_summary":
|
||||||
|
blueprint = action.blueprint
|
||||||
|
config = blueprint.to_config_json()
|
||||||
|
```
|
||||||
@@ -136,7 +136,59 @@ See `studies/M1_Mirror/README.md` for a complete parent README example.
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Detailed Steps
|
## Interview Mode (DEFAULT)
|
||||||
|
|
||||||
|
**Study creation now uses Interview Mode by default.** This provides guided study creation with intelligent validation.
|
||||||
|
|
||||||
|
### Triggers (Any of These Start Interview Mode)
|
||||||
|
|
||||||
|
- "create a study", "new study", "set up study"
|
||||||
|
- "create a study for my bracket"
|
||||||
|
- "optimize this model"
|
||||||
|
- "I want to minimize mass"
|
||||||
|
- Any study creation request without "skip interview" or "manual"
|
||||||
|
|
||||||
|
### When to Skip Interview Mode (Manual)
|
||||||
|
|
||||||
|
Use manual mode only when:
|
||||||
|
- Power user who knows the exact configuration
|
||||||
|
- Recreating a known study configuration
|
||||||
|
- User explicitly says "skip interview", "quick setup", or "manual config"
|
||||||
|
|
||||||
|
### Starting Interview Mode
|
||||||
|
|
||||||
|
```python
|
||||||
|
from optimization_engine.interview import StudyInterviewEngine
|
||||||
|
|
||||||
|
engine = StudyInterviewEngine(study_path)
|
||||||
|
|
||||||
|
# Run introspection first (if model available)
|
||||||
|
introspection = {
|
||||||
|
"expressions": [...], # From part introspection
|
||||||
|
"model_path": "...",
|
||||||
|
"sim_path": "..."
|
||||||
|
}
|
||||||
|
|
||||||
|
session = engine.start_interview(study_name, introspection=introspection)
|
||||||
|
action = engine.get_first_question()
|
||||||
|
|
||||||
|
# Present action.message to user
|
||||||
|
# Process answers with: action = engine.process_answer(user_response)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Interview Benefits
|
||||||
|
|
||||||
|
- **Material-aware validation**: Checks stress limits against yield
|
||||||
|
- **Anti-pattern detection**: Warns about mass minimization without constraints
|
||||||
|
- **Auto extractor mapping**: Maps goals to correct extractors (E1-E10)
|
||||||
|
- **State persistence**: Resume interrupted interviews
|
||||||
|
- **Blueprint generation**: Creates validated configuration
|
||||||
|
|
||||||
|
See `.claude/skills/modules/study-interview-mode.md` for full documentation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Detailed Steps (Manual Mode - Power Users Only)
|
||||||
|
|
||||||
### Step 1: Gather Requirements
|
### Step 1: Gather Requirements
|
||||||
|
|
||||||
|
|||||||
239
docs/protocols/operations/OP_07_DISK_OPTIMIZATION.md
Normal file
239
docs/protocols/operations/OP_07_DISK_OPTIMIZATION.md
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
# OP_07: Disk Space Optimization
|
||||||
|
|
||||||
|
**Version:** 1.0
|
||||||
|
**Last Updated:** 2025-12-29
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This protocol manages disk space for Atomizer studies through:
|
||||||
|
1. **Local cleanup** - Remove regenerable files from completed studies
|
||||||
|
2. **Remote archival** - Archive to dalidou server (14TB available)
|
||||||
|
3. **On-demand restore** - Pull archived studies when needed
|
||||||
|
|
||||||
|
## Disk Usage Analysis
|
||||||
|
|
||||||
|
### Typical Study Breakdown
|
||||||
|
|
||||||
|
| File Type | Size/Trial | Purpose | Keep? |
|
||||||
|
|-----------|------------|---------|-------|
|
||||||
|
| `.op2` | 68 MB | Nastran results | **YES** - Needed for analysis |
|
||||||
|
| `.prt` | 30 MB | NX parts | NO - Copy of master |
|
||||||
|
| `.dat` | 16 MB | Solver input | NO - Regenerable |
|
||||||
|
| `.fem` | 14 MB | FEM mesh | NO - Copy of master |
|
||||||
|
| `.sim` | 7 MB | Simulation | NO - Copy of master |
|
||||||
|
| `.afm` | 4 MB | Assembly FEM | NO - Regenerable |
|
||||||
|
| `.json` | <1 MB | Params/results | **YES** - Metadata |
|
||||||
|
| Logs | <1 MB | F04/F06/log | NO - Diagnostic only |
|
||||||
|
|
||||||
|
**Per-trial overhead:** ~150 MB total, only ~70 MB essential
|
||||||
|
|
||||||
|
### M1_Mirror Example
|
||||||
|
|
||||||
|
```
|
||||||
|
Current: 194 GB (28 studies, 2000+ trials)
|
||||||
|
After cleanup: 95 GB (51% reduction)
|
||||||
|
After archive: 5 GB (keep best_design_archive only)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
### 1. Analyze Disk Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Single study
|
||||||
|
archive_study.bat analyze studies\M1_Mirror\m1_mirror_V12
|
||||||
|
|
||||||
|
# All studies in a project
|
||||||
|
archive_study.bat analyze studies\M1_Mirror
|
||||||
|
```
|
||||||
|
|
||||||
|
Output shows:
|
||||||
|
- Total size
|
||||||
|
- Essential vs deletable breakdown
|
||||||
|
- Trial count per study
|
||||||
|
- Per-extension analysis
|
||||||
|
|
||||||
|
### 2. Cleanup Completed Study
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Dry run (default) - see what would be deleted
|
||||||
|
archive_study.bat cleanup studies\M1_Mirror\m1_mirror_V12
|
||||||
|
|
||||||
|
# Actually delete
|
||||||
|
archive_study.bat cleanup studies\M1_Mirror\m1_mirror_V12 --execute
|
||||||
|
```
|
||||||
|
|
||||||
|
**What gets deleted:**
|
||||||
|
- `.prt`, `.fem`, `.sim`, `.afm` in trial folders
|
||||||
|
- `.dat`, `.f04`, `.f06`, `.log`, `.diag` solver files
|
||||||
|
- Temp files (`.txt`, `.exp`, `.bak`)
|
||||||
|
|
||||||
|
**What is preserved:**
|
||||||
|
- `1_setup/` folder (master model)
|
||||||
|
- `3_results/` folder (database, reports)
|
||||||
|
- All `.op2` files (Nastran results)
|
||||||
|
- All `.json` files (params, metadata)
|
||||||
|
- All `.npz` files (Zernike coefficients)
|
||||||
|
- `best_design_archive/` folder
|
||||||
|
|
||||||
|
### 3. Archive to Remote Server
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Dry run
|
||||||
|
archive_study.bat archive studies\M1_Mirror\m1_mirror_V12
|
||||||
|
|
||||||
|
# Actually archive
|
||||||
|
archive_study.bat archive studies\M1_Mirror\m1_mirror_V12 --execute
|
||||||
|
|
||||||
|
# Use Tailscale (when not on local network)
|
||||||
|
archive_study.bat archive studies\M1_Mirror\m1_mirror_V12 --execute --tailscale
|
||||||
|
```
|
||||||
|
|
||||||
|
**Process:**
|
||||||
|
1. Creates compressed `.tar.gz` archive
|
||||||
|
2. Uploads to `papa@192.168.86.50:/srv/storage/atomizer-archive/`
|
||||||
|
3. Deletes local archive after successful upload
|
||||||
|
|
||||||
|
### 4. List Remote Archives
|
||||||
|
|
||||||
|
```bash
|
||||||
|
archive_study.bat list
|
||||||
|
|
||||||
|
# Via Tailscale
|
||||||
|
archive_study.bat list --tailscale
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Restore from Remote
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Restore to studies/ folder
|
||||||
|
archive_study.bat restore m1_mirror_V12
|
||||||
|
|
||||||
|
# Via Tailscale
|
||||||
|
archive_study.bat restore m1_mirror_V12 --tailscale
|
||||||
|
```
|
||||||
|
|
||||||
|
## Remote Server Setup
|
||||||
|
|
||||||
|
**Server:** dalidou (Lenovo W520)
|
||||||
|
- Local IP: `192.168.86.50`
|
||||||
|
- Tailscale IP: `100.80.199.40`
|
||||||
|
- SSH user: `papa`
|
||||||
|
- Archive path: `/srv/storage/atomizer-archive/`
|
||||||
|
|
||||||
|
### First-Time Setup
|
||||||
|
|
||||||
|
SSH into dalidou and create the archive directory:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh papa@192.168.86.50
|
||||||
|
mkdir -p /srv/storage/atomizer-archive
|
||||||
|
```
|
||||||
|
|
||||||
|
Ensure SSH key authentication is set up for passwordless transfers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On Windows (PowerShell)
|
||||||
|
ssh-copy-id papa@192.168.86.50
|
||||||
|
```
|
||||||
|
|
||||||
|
## Recommended Workflow
|
||||||
|
|
||||||
|
### During Active Optimization
|
||||||
|
|
||||||
|
Keep all files - you may need to re-run specific trials.
|
||||||
|
|
||||||
|
### After Study Completion
|
||||||
|
|
||||||
|
1. **Generate final report** (`STUDY_REPORT.md`)
|
||||||
|
2. **Archive best design** to `3_results/best_design_archive/`
|
||||||
|
3. **Cleanup:**
|
||||||
|
```bash
|
||||||
|
archive_study.bat cleanup studies\M1_Mirror\m1_mirror_V12 --execute
|
||||||
|
```
|
||||||
|
|
||||||
|
### For Long-Term Storage
|
||||||
|
|
||||||
|
1. **After cleanup**, archive to server:
|
||||||
|
```bash
|
||||||
|
archive_study.bat archive studies\M1_Mirror\m1_mirror_V12 --execute
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Optionally delete local** (keep only `3_results/best_design_archive/`)
|
||||||
|
|
||||||
|
### When Revisiting Old Study
|
||||||
|
|
||||||
|
1. **Restore:**
|
||||||
|
```bash
|
||||||
|
archive_study.bat restore m1_mirror_V12
|
||||||
|
```
|
||||||
|
|
||||||
|
2. If you need to re-run trials, the `1_setup/` master files allow regenerating everything
|
||||||
|
|
||||||
|
## Safety Features
|
||||||
|
|
||||||
|
- **Dry run by default** - Must add `--execute` to actually delete/transfer
|
||||||
|
- **Master files preserved** - `1_setup/` is never touched
|
||||||
|
- **Results preserved** - `3_results/` is never touched
|
||||||
|
- **Essential files preserved** - OP2, JSON, NPZ always kept
|
||||||
|
|
||||||
|
## Disk Space Targets
|
||||||
|
|
||||||
|
| Stage | M1_Mirror Target |
|
||||||
|
|-------|------------------|
|
||||||
|
| Active development | 200 GB (full) |
|
||||||
|
| Completed studies | 95 GB (after cleanup) |
|
||||||
|
| Archived (minimal local) | 5 GB (best only) |
|
||||||
|
| Server archive | 50 GB compressed |
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### SSH Connection Failed
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test connectivity
|
||||||
|
ping 192.168.86.50
|
||||||
|
|
||||||
|
# Test SSH
|
||||||
|
ssh papa@192.168.86.50 "echo connected"
|
||||||
|
|
||||||
|
# If on different network, use Tailscale
|
||||||
|
ssh papa@100.80.199.40 "echo connected"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Archive Upload Slow
|
||||||
|
|
||||||
|
Large studies (50+ GB) take time. The tool uses `rsync` with progress display.
|
||||||
|
For very large archives, consider running overnight or using direct LAN connection.
|
||||||
|
|
||||||
|
### Out of Disk Space During Archive
|
||||||
|
|
||||||
|
The archive is created locally first. Ensure you have ~1.5x the study size free:
|
||||||
|
- 20 GB study = ~30 GB temp space needed
|
||||||
|
|
||||||
|
## Python API
|
||||||
|
|
||||||
|
```python
|
||||||
|
from optimization_engine.utils.study_archiver import (
|
||||||
|
analyze_study,
|
||||||
|
cleanup_study,
|
||||||
|
archive_to_remote,
|
||||||
|
restore_from_remote,
|
||||||
|
list_remote_archives,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Analyze
|
||||||
|
analysis = analyze_study(Path("studies/M1_Mirror/m1_mirror_V12"))
|
||||||
|
print(f"Deletable: {analysis['deletable_size']/1e9:.2f} GB")
|
||||||
|
|
||||||
|
# Cleanup (dry_run=False to actually delete)
|
||||||
|
cleanup_study(Path("studies/M1_Mirror/m1_mirror_V12"), dry_run=False)
|
||||||
|
|
||||||
|
# Archive
|
||||||
|
archive_to_remote(Path("studies/M1_Mirror/m1_mirror_V12"), dry_run=False)
|
||||||
|
|
||||||
|
# List remote
|
||||||
|
archives = list_remote_archives()
|
||||||
|
for a in archives:
|
||||||
|
print(f"{a['name']}: {a['size']}")
|
||||||
|
```
|
||||||
360
docs/protocols/system/SYS_16_SELF_AWARE_TURBO.md
Normal file
360
docs/protocols/system/SYS_16_SELF_AWARE_TURBO.md
Normal file
@@ -0,0 +1,360 @@
|
|||||||
|
# SYS_16: Self-Aware Turbo (SAT) Optimization
|
||||||
|
|
||||||
|
## Version: 3.0
|
||||||
|
## Status: VALIDATED
|
||||||
|
## Created: 2025-12-28
|
||||||
|
## Updated: 2025-12-31
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Summary
|
||||||
|
|
||||||
|
**SAT v3 achieved WS=205.58, beating all previous methods (V7 TPE: 218.26, V6 TPE: 225.41).**
|
||||||
|
|
||||||
|
SAT is a surrogate-accelerated optimization method that:
|
||||||
|
1. Trains an **ensemble of 5 MLPs** on historical FEA data
|
||||||
|
2. Uses **adaptive exploration** that decreases over time (15%→8%→3%)
|
||||||
|
3. Filters candidates to prevent **duplicate evaluations**
|
||||||
|
4. Applies **soft mass constraints** in the acquisition function
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Version History
|
||||||
|
|
||||||
|
| Version | Study | Training Data | Key Fix | Best WS |
|
||||||
|
|---------|-------|---------------|---------|---------|
|
||||||
|
| v1 | V7 | 129 (V6 only) | - | 218.26 |
|
||||||
|
| v2 | V8 | 196 (V6 only) | Duplicate prevention | 271.38 |
|
||||||
|
| **v3** | **V9** | **556 (V5-V8)** | **Adaptive exploration + mass targeting** | **205.58** |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Problem Statement
|
||||||
|
|
||||||
|
V5 surrogate + L-BFGS failed catastrophically because:
|
||||||
|
1. MLP predicted WS=280 but actual was WS=376 (30%+ error)
|
||||||
|
2. L-BFGS descended to regions **outside training distribution**
|
||||||
|
3. Surrogate had no way to signal uncertainty
|
||||||
|
4. All L-BFGS solutions converged to the same "fake optimum"
|
||||||
|
|
||||||
|
**Root cause:** The surrogate is overconfident in regions where it has no data.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Solution: Uncertainty-Aware Surrogate with Active Learning
|
||||||
|
|
||||||
|
### Core Principles
|
||||||
|
|
||||||
|
1. **Never trust a point prediction** - Always require uncertainty bounds
|
||||||
|
2. **High uncertainty = run FEA** - Don't optimize where you don't know
|
||||||
|
3. **Actively fill gaps** - Prioritize FEA in high-uncertainty regions
|
||||||
|
4. **Validate gradient solutions** - Check L-BFGS results against FEA before trusting
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### 1. Ensemble Surrogate (Epistemic Uncertainty)
|
||||||
|
|
||||||
|
Instead of one MLP, train **N independent models** with different initializations:
|
||||||
|
|
||||||
|
```python
|
||||||
|
class EnsembleSurrogate:
|
||||||
|
def __init__(self, n_models=5):
|
||||||
|
self.models = [MLP() for _ in range(n_models)]
|
||||||
|
|
||||||
|
def predict(self, x):
|
||||||
|
preds = [m.predict(x) for m in self.models]
|
||||||
|
mean = np.mean(preds, axis=0)
|
||||||
|
std = np.std(preds, axis=0) # Epistemic uncertainty
|
||||||
|
return mean, std
|
||||||
|
|
||||||
|
def is_confident(self, x, threshold=0.1):
|
||||||
|
mean, std = self.predict(x)
|
||||||
|
# Confident if std < 10% of mean
|
||||||
|
return (std / (mean + 1e-6)) < threshold
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why this works:** Models trained on different random seeds will agree in well-sampled regions but disagree wildly in extrapolation regions.
|
||||||
|
|
||||||
|
### 2. Distance-Based OOD Detection
|
||||||
|
|
||||||
|
Track training data distribution and flag points that are "too far":
|
||||||
|
|
||||||
|
```python
|
||||||
|
class OODDetector:
|
||||||
|
def __init__(self, X_train):
|
||||||
|
self.X_train = X_train
|
||||||
|
self.mean = X_train.mean(axis=0)
|
||||||
|
self.std = X_train.std(axis=0)
|
||||||
|
# Fit KNN for local density
|
||||||
|
self.knn = NearestNeighbors(n_neighbors=5)
|
||||||
|
self.knn.fit(X_train)
|
||||||
|
|
||||||
|
def distance_to_training(self, x):
|
||||||
|
"""Return distance to nearest training points."""
|
||||||
|
distances, _ = self.knn.kneighbors(x.reshape(1, -1))
|
||||||
|
return distances.mean()
|
||||||
|
|
||||||
|
def is_in_distribution(self, x, threshold=2.0):
|
||||||
|
"""Check if point is within 2 std of training data."""
|
||||||
|
z_scores = np.abs((x - self.mean) / (self.std + 1e-6))
|
||||||
|
return z_scores.max() < threshold
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Trust-Region L-BFGS
|
||||||
|
|
||||||
|
Constrain L-BFGS to stay within training distribution:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def trust_region_lbfgs(surrogate, ood_detector, x0, max_iter=100):
|
||||||
|
"""L-BFGS that respects training data boundaries."""
|
||||||
|
|
||||||
|
def constrained_objective(x):
|
||||||
|
# If OOD, return large penalty
|
||||||
|
if not ood_detector.is_in_distribution(x):
|
||||||
|
return 1e9
|
||||||
|
|
||||||
|
mean, std = surrogate.predict(x)
|
||||||
|
# If uncertain, return upper confidence bound (pessimistic)
|
||||||
|
if std > 0.1 * mean:
|
||||||
|
return mean + 2 * std # Be conservative
|
||||||
|
|
||||||
|
return mean
|
||||||
|
|
||||||
|
result = minimize(constrained_objective, x0, method='L-BFGS-B')
|
||||||
|
return result.x
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Acquisition Function with Uncertainty
|
||||||
|
|
||||||
|
Use **Expected Improvement with Uncertainty** (like Bayesian Optimization):
|
||||||
|
|
||||||
|
```python
|
||||||
|
def acquisition_score(x, surrogate, best_so_far):
|
||||||
|
"""Score = potential improvement weighted by confidence."""
|
||||||
|
mean, std = surrogate.predict(x)
|
||||||
|
|
||||||
|
# Expected improvement (lower is better for minimization)
|
||||||
|
improvement = best_so_far - mean
|
||||||
|
|
||||||
|
# Exploration bonus for uncertain regions
|
||||||
|
exploration = 0.5 * std
|
||||||
|
|
||||||
|
# High score = worth evaluating with FEA
|
||||||
|
return improvement + exploration
|
||||||
|
|
||||||
|
def select_next_fea_candidates(surrogate, candidates, best_so_far, n=5):
|
||||||
|
"""Select candidates balancing exploitation and exploration."""
|
||||||
|
scores = [acquisition_score(c, surrogate, best_so_far) for c in candidates]
|
||||||
|
|
||||||
|
# Pick top candidates by acquisition score
|
||||||
|
top_indices = np.argsort(scores)[-n:]
|
||||||
|
return [candidates[i] for i in top_indices]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Algorithm: Self-Aware Turbo (SAT)
|
||||||
|
|
||||||
|
```
|
||||||
|
INITIALIZE:
|
||||||
|
- Load existing FEA data (X_train, Y_train)
|
||||||
|
- Train ensemble surrogate on data
|
||||||
|
- Fit OOD detector on X_train
|
||||||
|
- Set best_ws = min(Y_train)
|
||||||
|
|
||||||
|
PHASE 1: UNCERTAINTY MAPPING (10% of budget)
|
||||||
|
FOR i in 1..N_mapping:
|
||||||
|
- Sample random point x
|
||||||
|
- Get uncertainty: mean, std = surrogate.predict(x)
|
||||||
|
- If std > threshold: run FEA, add to training data
|
||||||
|
- Retrain ensemble periodically
|
||||||
|
|
||||||
|
This fills in the "holes" in the surrogate's knowledge.
|
||||||
|
|
||||||
|
PHASE 2: EXPLOITATION WITH VALIDATION (80% of budget)
|
||||||
|
FOR i in 1..N_exploit:
|
||||||
|
- Generate 1000 TPE samples
|
||||||
|
- Filter to keep only confident predictions (std < 10% of mean)
|
||||||
|
- Filter to keep only in-distribution (OOD check)
|
||||||
|
- Rank by predicted WS
|
||||||
|
|
||||||
|
- Take top 5 candidates
|
||||||
|
- Run FEA on all 5
|
||||||
|
|
||||||
|
- For each FEA result:
|
||||||
|
- Compare predicted vs actual
|
||||||
|
- If error > 20%: mark region as "unreliable", force exploration there
|
||||||
|
- If error < 10%: update best, retrain surrogate
|
||||||
|
|
||||||
|
- Every 10 iterations: retrain ensemble with new data
|
||||||
|
|
||||||
|
PHASE 3: L-BFGS REFINEMENT (10% of budget)
|
||||||
|
- Only run L-BFGS if ensemble R² > 0.95 on validation set
|
||||||
|
- Use trust-region L-BFGS (stay within training distribution)
|
||||||
|
|
||||||
|
FOR each L-BFGS solution:
|
||||||
|
- Check ensemble disagreement
|
||||||
|
- If models agree (std < 5%): run FEA to validate
|
||||||
|
- If models disagree: skip, too uncertain
|
||||||
|
|
||||||
|
- Compare L-BFGS prediction vs FEA
|
||||||
|
- If error > 15%: ABORT L-BFGS phase, return to Phase 2
|
||||||
|
- If error < 10%: accept as candidate
|
||||||
|
|
||||||
|
FINAL:
|
||||||
|
- Return best FEA-validated design
|
||||||
|
- Report uncertainty bounds for all objectives
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Differences from V5
|
||||||
|
|
||||||
|
| Aspect | V5 (Failed) | SAT (Proposed) |
|
||||||
|
|--------|-------------|----------------|
|
||||||
|
| **Model** | Single MLP | Ensemble of 5 MLPs |
|
||||||
|
| **Uncertainty** | None | Ensemble disagreement + OOD detection |
|
||||||
|
| **L-BFGS** | Trust blindly | Trust-region, validate every step |
|
||||||
|
| **Extrapolation** | Accept | Reject or penalize |
|
||||||
|
| **Active learning** | No | Yes - prioritize uncertain regions |
|
||||||
|
| **Validation** | After L-BFGS | Throughout |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Implementation Checklist
|
||||||
|
|
||||||
|
1. [ ] `EnsembleSurrogate` class with N=5 MLPs
|
||||||
|
2. [ ] `OODDetector` with KNN + z-score checks
|
||||||
|
3. [ ] `acquisition_score()` balancing exploitation/exploration
|
||||||
|
4. [ ] Trust-region L-BFGS with OOD penalties
|
||||||
|
5. [ ] Automatic retraining when new FEA data arrives
|
||||||
|
6. [ ] Logging of prediction errors to track surrogate quality
|
||||||
|
7. [ ] Early abort if L-BFGS predictions consistently wrong
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Expected Behavior
|
||||||
|
|
||||||
|
**In well-sampled regions:**
|
||||||
|
- Ensemble agrees → Low uncertainty → Trust predictions
|
||||||
|
- L-BFGS finds valid optima → FEA confirms → Success
|
||||||
|
|
||||||
|
**In poorly-sampled regions:**
|
||||||
|
- Ensemble disagrees → High uncertainty → Run FEA instead
|
||||||
|
- L-BFGS penalized → Stays in trusted zone → No fake optima
|
||||||
|
|
||||||
|
**At distribution boundaries:**
|
||||||
|
- OOD detector flags → Reject predictions
|
||||||
|
- Acquisition prioritizes → Active learning fills gaps
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Metrics to Track
|
||||||
|
|
||||||
|
1. **Surrogate R² on validation set** - Target > 0.95 before L-BFGS
|
||||||
|
2. **Prediction error histogram** - Should be centered at 0
|
||||||
|
3. **OOD rejection rate** - How often we refuse to predict
|
||||||
|
4. **Ensemble disagreement** - Average std across predictions
|
||||||
|
5. **L-BFGS success rate** - % of L-BFGS solutions that validate
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## When to Use SAT vs Pure TPE
|
||||||
|
|
||||||
|
| Scenario | Recommendation |
|
||||||
|
|----------|----------------|
|
||||||
|
| < 100 existing samples | Pure TPE (not enough for good surrogate) |
|
||||||
|
| 100-500 samples | SAT Phase 1-2 only (no L-BFGS) |
|
||||||
|
| > 500 samples | Full SAT with L-BFGS refinement |
|
||||||
|
| High-dimensional (>20 params) | Pure TPE (curse of dimensionality) |
|
||||||
|
| Noisy FEA | Pure TPE (surrogates struggle with noise) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## SAT v3 Implementation Details
|
||||||
|
|
||||||
|
### Adaptive Exploration Schedule
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_exploration_weight(trial_num):
|
||||||
|
if trial_num <= 30: return 0.15 # Phase 1: 15% exploration
|
||||||
|
elif trial_num <= 80: return 0.08 # Phase 2: 8% exploration
|
||||||
|
else: return 0.03 # Phase 3: 3% exploitation
|
||||||
|
```
|
||||||
|
|
||||||
|
### Acquisition Function (v3)
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Normalize components
|
||||||
|
norm_ws = (pred_ws - pred_ws.min()) / (pred_ws.max() - pred_ws.min())
|
||||||
|
norm_dist = distances / distances.max()
|
||||||
|
mass_penalty = max(0, pred_mass - 118.0) * 5.0 # Soft threshold at 118 kg
|
||||||
|
|
||||||
|
# Adaptive acquisition (lower = better)
|
||||||
|
acquisition = norm_ws - exploration_weight * norm_dist + norm_mass_penalty
|
||||||
|
```
|
||||||
|
|
||||||
|
### Candidate Generation (v3)
|
||||||
|
|
||||||
|
```python
|
||||||
|
for _ in range(1000):
|
||||||
|
if random() < 0.7 and best_x is not None:
|
||||||
|
# 70% exploitation: sample near best
|
||||||
|
scale = uniform(0.05, 0.15)
|
||||||
|
candidate = sample_near_point(best_x, scale)
|
||||||
|
else:
|
||||||
|
# 30% exploration: random sampling
|
||||||
|
candidate = sample_random()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Key Configuration (v3)
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"n_ensemble_models": 5,
|
||||||
|
"training_epochs": 800,
|
||||||
|
"candidates_per_round": 1000,
|
||||||
|
"min_distance_threshold": 0.03,
|
||||||
|
"mass_soft_threshold": 118.0,
|
||||||
|
"exploit_near_best_ratio": 0.7,
|
||||||
|
"lbfgs_polish_trials": 10
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## V9 Results
|
||||||
|
|
||||||
|
| Phase | Trials | Best WS | Mean WS |
|
||||||
|
|-------|--------|---------|---------|
|
||||||
|
| Phase 1 (explore) | 30 | 232.00 | 394.48 |
|
||||||
|
| Phase 2 (balanced) | 50 | 222.01 | 360.51 |
|
||||||
|
| Phase 3 (exploit) | 57+ | **205.58** | 262.57 |
|
||||||
|
|
||||||
|
**Key metrics:**
|
||||||
|
- 100% feasibility rate
|
||||||
|
- 100% unique designs (no duplicates)
|
||||||
|
- Surrogate R² = 0.99
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- Gaussian Process literature on uncertainty quantification
|
||||||
|
- Deep Ensembles: Lakshminarayanan et al. (2017)
|
||||||
|
- Bayesian Optimization with Expected Improvement
|
||||||
|
- Trust-region methods for constrained optimization
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Implementation
|
||||||
|
|
||||||
|
- **V9 Study:** `studies/M1_Mirror/m1_mirror_cost_reduction_flat_back_V9/`
|
||||||
|
- **Script:** `run_sat_optimization.py`
|
||||||
|
- **Ensemble:** `optimization_engine/surrogates/ensemble_surrogate.py`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*The key insight: A surrogate that knows when it doesn't know is infinitely more valuable than one that's confidently wrong.*
|
||||||
307
docs/protocols/system/SYS_17_CONTEXT_ENGINEERING.md
Normal file
307
docs/protocols/system/SYS_17_CONTEXT_ENGINEERING.md
Normal file
@@ -0,0 +1,307 @@
|
|||||||
|
---
|
||||||
|
protocol_id: SYS_17
|
||||||
|
version: 1.0
|
||||||
|
last_updated: 2025-12-29
|
||||||
|
status: active
|
||||||
|
owner: system
|
||||||
|
code_dependencies:
|
||||||
|
- optimization_engine.context.*
|
||||||
|
requires_protocols: []
|
||||||
|
---
|
||||||
|
|
||||||
|
# SYS_17: Context Engineering System
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Context Engineering System implements the **Agentic Context Engineering (ACE)** framework, enabling Atomizer to learn from every optimization run and accumulate institutional knowledge over time.
|
||||||
|
|
||||||
|
## When to Load This Protocol
|
||||||
|
|
||||||
|
Load SYS_17 when:
|
||||||
|
- User asks about "learning", "playbook", or "context engineering"
|
||||||
|
- Debugging why certain knowledge isn't being applied
|
||||||
|
- Configuring context behavior
|
||||||
|
- Analyzing what the system has learned
|
||||||
|
|
||||||
|
## Core Concepts
|
||||||
|
|
||||||
|
### The ACE Framework
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
|
||||||
|
│ Generator │────▶│ Reflector │────▶│ Curator │
|
||||||
|
│ (Opt Runs) │ │ (Analysis) │ │ (Playbook) │
|
||||||
|
└─────────────┘ └─────────────┘ └─────────────┘
|
||||||
|
│ │
|
||||||
|
└───────────── Feedback ───────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
1. **Generator**: OptimizationRunner produces trial outcomes
|
||||||
|
2. **Reflector**: Analyzes outcomes, extracts patterns
|
||||||
|
3. **Curator**: Playbook stores and manages insights
|
||||||
|
4. **Feedback**: Success/failure updates insight scores
|
||||||
|
|
||||||
|
### Playbook Item Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
[str-00001] helpful=8 harmful=0 :: "Use shell elements for thin walls"
|
||||||
|
│ │ │ │
|
||||||
|
│ │ │ └── Insight content
|
||||||
|
│ │ └── Times advice led to failure
|
||||||
|
│ └── Times advice led to success
|
||||||
|
└── Unique ID (category-number)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Categories
|
||||||
|
|
||||||
|
| Code | Name | Description | Example |
|
||||||
|
|------|------|-------------|---------|
|
||||||
|
| `str` | STRATEGY | Optimization approaches | "Start with TPE, switch to CMA-ES" |
|
||||||
|
| `mis` | MISTAKE | Things to avoid | "Don't use coarse mesh for stress" |
|
||||||
|
| `tool` | TOOL | Tool usage tips | "Use GP sampler for few-shot" |
|
||||||
|
| `cal` | CALCULATION | Formulas | "Safety factor = yield/max_stress" |
|
||||||
|
| `dom` | DOMAIN | Domain knowledge | "Zernike coefficients for mirrors" |
|
||||||
|
| `wf` | WORKFLOW | Workflow patterns | "Load _i.prt before UpdateFemodel()" |
|
||||||
|
|
||||||
|
## Key Components
|
||||||
|
|
||||||
|
### 1. AtomizerPlaybook
|
||||||
|
|
||||||
|
Location: `optimization_engine/context/playbook.py`
|
||||||
|
|
||||||
|
The central knowledge store. Handles:
|
||||||
|
- Adding insights (with auto-deduplication)
|
||||||
|
- Recording helpful/harmful outcomes
|
||||||
|
- Generating filtered context for LLM
|
||||||
|
- Pruning consistently harmful items
|
||||||
|
- Persistence (JSON)
|
||||||
|
|
||||||
|
**Quick Usage:**
|
||||||
|
```python
|
||||||
|
from optimization_engine.context import get_playbook, save_playbook, InsightCategory
|
||||||
|
|
||||||
|
playbook = get_playbook()
|
||||||
|
playbook.add_insight(InsightCategory.STRATEGY, "Use shell elements for thin walls")
|
||||||
|
playbook.record_outcome("str-00001", helpful=True)
|
||||||
|
save_playbook()
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. AtomizerReflector
|
||||||
|
|
||||||
|
Location: `optimization_engine/context/reflector.py`
|
||||||
|
|
||||||
|
Analyzes optimization outcomes to extract insights:
|
||||||
|
- Classifies errors (convergence, mesh, singularity, etc.)
|
||||||
|
- Extracts success patterns
|
||||||
|
- Generates study-level insights
|
||||||
|
|
||||||
|
**Quick Usage:**
|
||||||
|
```python
|
||||||
|
from optimization_engine.context import AtomizerReflector, OptimizationOutcome
|
||||||
|
|
||||||
|
reflector = AtomizerReflector(playbook)
|
||||||
|
outcome = OptimizationOutcome(trial_number=42, success=True, ...)
|
||||||
|
insights = reflector.analyze_trial(outcome)
|
||||||
|
reflector.commit_insights()
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. FeedbackLoop
|
||||||
|
|
||||||
|
Location: `optimization_engine/context/feedback_loop.py`
|
||||||
|
|
||||||
|
Automated learning loop that:
|
||||||
|
- Processes trial results
|
||||||
|
- Updates playbook scores based on outcomes
|
||||||
|
- Tracks which items were active per trial
|
||||||
|
- Finalizes learning at study end
|
||||||
|
|
||||||
|
**Quick Usage:**
|
||||||
|
```python
|
||||||
|
from optimization_engine.context import FeedbackLoop
|
||||||
|
|
||||||
|
feedback = FeedbackLoop(playbook_path)
|
||||||
|
feedback.process_trial_result(trial_number=42, success=True, ...)
|
||||||
|
feedback.finalize_study({"name": "study", "total_trials": 100, ...})
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. SessionState
|
||||||
|
|
||||||
|
Location: `optimization_engine/context/session_state.py`
|
||||||
|
|
||||||
|
Manages context isolation:
|
||||||
|
- **Exposed**: Always in LLM context (task type, recent actions, errors)
|
||||||
|
- **Isolated**: On-demand access (full history, NX paths, F06 content)
|
||||||
|
|
||||||
|
**Quick Usage:**
|
||||||
|
```python
|
||||||
|
from optimization_engine.context import get_session, TaskType
|
||||||
|
|
||||||
|
session = get_session()
|
||||||
|
session.exposed.task_type = TaskType.RUN_OPTIMIZATION
|
||||||
|
session.add_action("Started trial 42")
|
||||||
|
context = session.get_llm_context()
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. CompactionManager
|
||||||
|
|
||||||
|
Location: `optimization_engine/context/compaction.py`
|
||||||
|
|
||||||
|
Handles long sessions:
|
||||||
|
- Triggers compaction at threshold (default 50 events)
|
||||||
|
- Summarizes old events into statistics
|
||||||
|
- Preserves errors and milestones
|
||||||
|
|
||||||
|
### 6. CacheOptimizer
|
||||||
|
|
||||||
|
Location: `optimization_engine/context/cache_monitor.py`
|
||||||
|
|
||||||
|
Optimizes for KV-cache:
|
||||||
|
- Three-tier context structure (stable/semi-stable/dynamic)
|
||||||
|
- Tracks cache hit rate
|
||||||
|
- Estimates cost savings
|
||||||
|
|
||||||
|
## Integration with OptimizationRunner
|
||||||
|
|
||||||
|
### Option 1: Mixin
|
||||||
|
|
||||||
|
```python
|
||||||
|
from optimization_engine.context.runner_integration import ContextEngineeringMixin
|
||||||
|
|
||||||
|
class MyRunner(ContextEngineeringMixin, OptimizationRunner):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.init_context_engineering()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Option 2: Wrapper
|
||||||
|
|
||||||
|
```python
|
||||||
|
from optimization_engine.context.runner_integration import ContextAwareRunner
|
||||||
|
|
||||||
|
runner = OptimizationRunner(config_path=...)
|
||||||
|
context_runner = ContextAwareRunner(runner)
|
||||||
|
context_runner.run(n_trials=100)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Dashboard API
|
||||||
|
|
||||||
|
Base URL: `/api/context`
|
||||||
|
|
||||||
|
| Endpoint | Method | Description |
|
||||||
|
|----------|--------|-------------|
|
||||||
|
| `/playbook` | GET | Playbook summary |
|
||||||
|
| `/playbook/items` | GET | List items (with filters) |
|
||||||
|
| `/playbook/items/{id}` | GET | Get specific item |
|
||||||
|
| `/playbook/feedback` | POST | Record helpful/harmful |
|
||||||
|
| `/playbook/insights` | POST | Add new insight |
|
||||||
|
| `/playbook/prune` | POST | Prune harmful items |
|
||||||
|
| `/playbook/context` | GET | Get LLM context string |
|
||||||
|
| `/session` | GET | Session state |
|
||||||
|
| `/learning/report` | GET | Learning report |
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
### 1. Record Immediately
|
||||||
|
|
||||||
|
Don't wait until session end:
|
||||||
|
```python
|
||||||
|
# RIGHT: Record immediately
|
||||||
|
playbook.add_insight(InsightCategory.MISTAKE, "Convergence failed with X")
|
||||||
|
playbook.save(path)
|
||||||
|
|
||||||
|
# WRONG: Wait until end
|
||||||
|
# (User might close session, learning lost)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Be Specific
|
||||||
|
|
||||||
|
```python
|
||||||
|
# GOOD: Specific and actionable
|
||||||
|
"For bracket optimization with >5 variables, TPE outperforms random search"
|
||||||
|
|
||||||
|
# BAD: Vague
|
||||||
|
"TPE is good"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Include Context
|
||||||
|
|
||||||
|
```python
|
||||||
|
playbook.add_insight(
|
||||||
|
InsightCategory.STRATEGY,
|
||||||
|
"Shell elements reduce solve time by 40% for thickness < 2mm",
|
||||||
|
tags=["mesh", "shell", "performance"]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Review Harmful Items
|
||||||
|
|
||||||
|
Periodically check items with negative scores:
|
||||||
|
```python
|
||||||
|
harmful = [i for i in playbook.items.values() if i.net_score < 0]
|
||||||
|
for item in harmful:
|
||||||
|
print(f"{item.id}: {item.content[:50]}... (score={item.net_score})")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Playbook Not Updating
|
||||||
|
|
||||||
|
1. Check playbook path:
|
||||||
|
```python
|
||||||
|
print(playbook_path) # Should be knowledge_base/playbook.json
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Verify save is called:
|
||||||
|
```python
|
||||||
|
playbook.save(path) # Must be explicit
|
||||||
|
```
|
||||||
|
|
||||||
|
### Insights Not Appearing in Context
|
||||||
|
|
||||||
|
1. Check confidence threshold:
|
||||||
|
```python
|
||||||
|
# Default is 0.5 - new items start at 0.5
|
||||||
|
context = playbook.get_context_for_task("opt", min_confidence=0.3)
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Check if items exist:
|
||||||
|
```python
|
||||||
|
print(f"Total items: {len(playbook.items)}")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Learning Not Working
|
||||||
|
|
||||||
|
1. Verify FeedbackLoop is finalized:
|
||||||
|
```python
|
||||||
|
feedback.finalize_study(...) # MUST be called
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Check context_items_used parameter:
|
||||||
|
```python
|
||||||
|
# Items must be explicitly tracked
|
||||||
|
feedback.process_trial_result(
|
||||||
|
...,
|
||||||
|
context_items_used=list(playbook.items.keys())[:10]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Files Reference
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `optimization_engine/context/__init__.py` | Module exports |
|
||||||
|
| `optimization_engine/context/playbook.py` | Knowledge store |
|
||||||
|
| `optimization_engine/context/reflector.py` | Outcome analysis |
|
||||||
|
| `optimization_engine/context/session_state.py` | Context isolation |
|
||||||
|
| `optimization_engine/context/feedback_loop.py` | Learning loop |
|
||||||
|
| `optimization_engine/context/compaction.py` | Long session management |
|
||||||
|
| `optimization_engine/context/cache_monitor.py` | KV-cache optimization |
|
||||||
|
| `optimization_engine/context/runner_integration.py` | Runner integration |
|
||||||
|
| `knowledge_base/playbook.json` | Persistent storage |
|
||||||
|
|
||||||
|
## See Also
|
||||||
|
|
||||||
|
- `docs/CONTEXT_ENGINEERING_REPORT.md` - Full implementation report
|
||||||
|
- `.claude/skills/00_BOOTSTRAP_V2.md` - Enhanced bootstrap
|
||||||
|
- `tests/test_context_engineering.py` - Unit tests
|
||||||
|
- `tests/test_context_integration.py` - Integration tests
|
||||||
@@ -26,7 +26,7 @@ if sys.platform == 'win32':
|
|||||||
project_root = Path(__file__).parent.parent
|
project_root = Path(__file__).parent.parent
|
||||||
sys.path.insert(0, str(project_root))
|
sys.path.insert(0, str(project_root))
|
||||||
|
|
||||||
from optimization_engine.research_agent import (
|
from optimization_engine.future.research_agent import (
|
||||||
ResearchAgent,
|
ResearchAgent,
|
||||||
ResearchFindings,
|
ResearchFindings,
|
||||||
KnowledgeGap,
|
KnowledgeGap,
|
||||||
|
|||||||
@@ -3,3 +3,7 @@
|
|||||||
{"timestamp":"2025-12-19T10:00:00","category":"workaround","context":"NX journal execution via cmd /c with environment variables fails silently or produces garbled output. Multiple attempts with cmd /c SET and && chaining failed to capture run_journal.exe output.","insight":"CRITICAL WORKAROUND: When executing NX journals from Claude Code on Windows, use PowerShell with [Environment]::SetEnvironmentVariable() method instead of cmd /c or $env: syntax. The correct pattern is: powershell -Command \"[Environment]::SetEnvironmentVariable('SPLM_LICENSE_SERVER', '28000@dalidou;28000@100.80.199.40', 'Process'); & 'C:\\Program Files\\Siemens\\DesigncenterNX2512\\NXBIN\\run_journal.exe' 'journal.py' -args 'arg1' 'arg2' 2>&1\". The $env: syntax gets corrupted when passed through bash (colon gets interpreted). The cmd /c SET syntax often fails to capture output. This PowerShell pattern reliably sets license server and captures all output.","confidence":1.0,"tags":["nx","powershell","run_journal","license-server","windows","cmd-workaround"],"severity":"high","rule":"ALWAYS use PowerShell with [Environment]::SetEnvironmentVariable() for NX journal execution. NEVER use cmd /c SET or $env: syntax for setting SPLM_LICENSE_SERVER."}
|
{"timestamp":"2025-12-19T10:00:00","category":"workaround","context":"NX journal execution via cmd /c with environment variables fails silently or produces garbled output. Multiple attempts with cmd /c SET and && chaining failed to capture run_journal.exe output.","insight":"CRITICAL WORKAROUND: When executing NX journals from Claude Code on Windows, use PowerShell with [Environment]::SetEnvironmentVariable() method instead of cmd /c or $env: syntax. The correct pattern is: powershell -Command \"[Environment]::SetEnvironmentVariable('SPLM_LICENSE_SERVER', '28000@dalidou;28000@100.80.199.40', 'Process'); & 'C:\\Program Files\\Siemens\\DesigncenterNX2512\\NXBIN\\run_journal.exe' 'journal.py' -args 'arg1' 'arg2' 2>&1\". The $env: syntax gets corrupted when passed through bash (colon gets interpreted). The cmd /c SET syntax often fails to capture output. This PowerShell pattern reliably sets license server and captures all output.","confidence":1.0,"tags":["nx","powershell","run_journal","license-server","windows","cmd-workaround"],"severity":"high","rule":"ALWAYS use PowerShell with [Environment]::SetEnvironmentVariable() for NX journal execution. NEVER use cmd /c SET or $env: syntax for setting SPLM_LICENSE_SERVER."}
|
||||||
{"timestamp":"2025-12-19T15:30:00","category":"failure","context":"CMA-ES optimization V7 started with random sample instead of baseline. First trial had whiffle_min=45.73 instead of baseline 62.75, resulting in WS=329 instead of expected ~281.","insight":"CMA-ES with Optuna CmaEsSampler does NOT evaluate x0 (baseline) first - it samples AROUND x0 with sigma0 step size. The x0 parameter only sets the CENTER of the initial sampling distribution, not the first trial. To ensure baseline is evaluated first, use study.enqueue_trial(x0) after creating the study. This is critical for refinement studies where you need to compare against a known-good baseline. Pattern: if len(study.trials) == 0: study.enqueue_trial(x0)","confidence":1.0,"tags":["cma-es","optuna","baseline","x0","enqueue","optimization"],"severity":"high","rule":"When using CmaEsSampler with a known baseline, ALWAYS enqueue the baseline as trial 0 using study.enqueue_trial(x0). The x0 parameter alone does NOT guarantee baseline evaluation."}
|
{"timestamp":"2025-12-19T15:30:00","category":"failure","context":"CMA-ES optimization V7 started with random sample instead of baseline. First trial had whiffle_min=45.73 instead of baseline 62.75, resulting in WS=329 instead of expected ~281.","insight":"CMA-ES with Optuna CmaEsSampler does NOT evaluate x0 (baseline) first - it samples AROUND x0 with sigma0 step size. The x0 parameter only sets the CENTER of the initial sampling distribution, not the first trial. To ensure baseline is evaluated first, use study.enqueue_trial(x0) after creating the study. This is critical for refinement studies where you need to compare against a known-good baseline. Pattern: if len(study.trials) == 0: study.enqueue_trial(x0)","confidence":1.0,"tags":["cma-es","optuna","baseline","x0","enqueue","optimization"],"severity":"high","rule":"When using CmaEsSampler with a known baseline, ALWAYS enqueue the baseline as trial 0 using study.enqueue_trial(x0). The x0 parameter alone does NOT guarantee baseline evaluation."}
|
||||||
{"timestamp":"2025-12-22T14:00:00","category":"failure","context":"V10 mirror optimization reported impossibly good relative WFE values (40-20=1.99nm instead of ~6nm, 60-20=6.82nm instead of ~13nm). User noticed results were 'too good to be true'.","insight":"CRITICAL BUG IN RELATIVE WFE CALCULATION: The V10 run_optimization.py computed relative WFE as abs(RMS_target - RMS_ref) instead of RMS(WFE_target - WFE_ref). This is mathematically WRONG because |RMS(A) - RMS(B)| ≠ RMS(A - B). The correct approach is to compute the node-by-node WFE difference FIRST, then fit Zernike to the difference field, then compute RMS. The bug gave values 3-4x lower than correct values because the 20° reference had HIGHER absolute WFE than 40°/60°, so the subtraction gave negative values, and abs() hid the problem. The fix is to use extractor.extract_relative() which correctly computes node-by-node differences. Both ZernikeExtractor and ZernikeOPDExtractor now have extract_relative() methods.","confidence":1.0,"tags":["zernike","wfe","relative-wfe","extract_relative","critical-bug","v10"],"severity":"critical","rule":"NEVER compute relative WFE as abs(RMS_target - RMS_ref). ALWAYS use extract_relative() which computes RMS(WFE_target - WFE_ref) by doing node-by-node subtraction first, then Zernike fitting, then RMS."}
|
{"timestamp":"2025-12-22T14:00:00","category":"failure","context":"V10 mirror optimization reported impossibly good relative WFE values (40-20=1.99nm instead of ~6nm, 60-20=6.82nm instead of ~13nm). User noticed results were 'too good to be true'.","insight":"CRITICAL BUG IN RELATIVE WFE CALCULATION: The V10 run_optimization.py computed relative WFE as abs(RMS_target - RMS_ref) instead of RMS(WFE_target - WFE_ref). This is mathematically WRONG because |RMS(A) - RMS(B)| ≠ RMS(A - B). The correct approach is to compute the node-by-node WFE difference FIRST, then fit Zernike to the difference field, then compute RMS. The bug gave values 3-4x lower than correct values because the 20° reference had HIGHER absolute WFE than 40°/60°, so the subtraction gave negative values, and abs() hid the problem. The fix is to use extractor.extract_relative() which correctly computes node-by-node differences. Both ZernikeExtractor and ZernikeOPDExtractor now have extract_relative() methods.","confidence":1.0,"tags":["zernike","wfe","relative-wfe","extract_relative","critical-bug","v10"],"severity":"critical","rule":"NEVER compute relative WFE as abs(RMS_target - RMS_ref). ALWAYS use extract_relative() which computes RMS(WFE_target - WFE_ref) by doing node-by-node subtraction first, then Zernike fitting, then RMS."}
|
||||||
|
{"timestamp":"2025-12-28T17:30:00","category":"failure","context":"V5 turbo optimization created from scratch instead of copying V4. Multiple critical components were missing or wrong: no license server, wrong extraction keys (filtered_rms_nm vs relative_filtered_rms_nm), wrong mfg_90 key, missing figure_path parameter, incomplete version regex.","insight":"STUDY DERIVATION FAILURE: When creating a new study version (V5 from V4), NEVER rewrite the run_optimization.py from scratch. ALWAYS copy the working version first, then add/modify only the new feature (e.g., L-BFGS polish). Rewriting caused 5 independent bugs: (1) missing LICENSE_SERVER setup, (2) wrong extraction key filtered_rms_nm instead of relative_filtered_rms_nm, (3) wrong mfg_90 key, (4) missing figure_path=None in extractor call, (5) incomplete version regex missing DesigncenterNX pattern. The FEA/extraction pipeline is PROVEN CODE - never rewrite it. Only add new optimization strategies as modules on top.","confidence":1.0,"tags":["study-creation","copy-dont-rewrite","extraction","license-server","v5","critical"],"severity":"critical","rule":"When deriving a new study version, COPY the entire working run_optimization.py first. Add new features as ADDITIONS, not rewrites. The FEA pipeline (license, NXSolver setup, extraction) is proven - never rewrite it."}
|
||||||
|
{"timestamp":"2025-12-28T21:30:00","category":"failure","context":"V5 flat back turbo optimization with MLP surrogate + L-BFGS polish. Surrogate predicted WS~280 but actual FEA gave WS~365-377. Error of 85-96 (30%+ relative error). All L-BFGS solutions converged to same fake optimum that didn't exist in reality.","insight":"SURROGATE + L-BFGS FAILURE MODE: Gradient-based optimization on MLP surrogates finds 'fake optima' that don't exist in real FEA. The surrogate has smooth gradients everywhere, but L-BFGS descends to regions OUTSIDE the training distribution where predictions are wildly wrong. V5 results: (1) Best TPE trial: WS=290.18, (2) Best L-BFGS trial: WS=325.27, (3) Worst L-BFGS trials: WS=376.52. The fancy L-BFGS polish made results WORSE than random TPE. Key issues: (a) No uncertainty quantification - can't detect out-of-distribution, (b) No mass constraint in surrogate - L-BFGS finds infeasible designs (122-124kg vs 120kg limit), (c) L-BFGS converges to same bad point from multiple starting locations (trials 31-44 all gave WS=376.52).","confidence":1.0,"tags":["surrogate","mlp","lbfgs","gradient-descent","fake-optima","out-of-distribution","v5","turbo"],"severity":"critical","rule":"NEVER trust gradient descent on surrogates without: (1) Uncertainty quantification to reject OOD predictions, (2) Mass/constraint prediction to enforce feasibility, (3) Trust-region to stay within training distribution. Pure TPE with real FEA often beats surrogate+gradient methods."}
|
||||||
|
{"timestamp": "2025-12-29T15:29:55.869508", "category": "failure", "context": "Trial 5 solver error", "insight": "convergence_failure: Convergence failure at iteration 100", "confidence": 0.7, "tags": ["solver", "convergence_failure", "automatic"]}
|
||||||
|
{"timestamp": "2026-01-01T21:06:37.877252", "category": "failure", "context": "V13 optimization had 45 FEA failures (34% failure rate)", "insight": "rib_thickness parameter has CAD geometry constraint at ~9mm. All trials with rib_thickness > 9.0 failed. Set max to 9.0 (was 12.0). This is a critical CAD constraint not documented anywhere - the NX model geometry breaks with thicker radial ribs.", "confidence": 0.95, "tags": ["m1_mirror", "cad_constraint", "rib_thickness", "V13", "parameter_bounds"]}
|
||||||
|
|||||||
@@ -5,3 +5,7 @@
|
|||||||
{"timestamp": "2025-12-28T10:15:00", "category": "success_pattern", "context": "Unified trial management with TrialManager and DashboardDB", "insight": "TRIAL MANAGEMENT PATTERN: Use TrialManager for consistent trial_NNNN naming across all optimization methods (Optuna, Turbo, GNN, manual). Key principles: (1) Trial numbers NEVER reset (monotonic), (2) Folders NEVER get overwritten, (3) Database always synced with filesystem, (4) Surrogate predictions are NOT trials - only FEA results. DashboardDB provides Optuna-compatible schema for dashboard integration. Path: optimization_engine/utils/trial_manager.py", "confidence": 0.95, "tags": ["trial_manager", "dashboard_db", "optuna", "trial_naming", "turbo"]}
|
{"timestamp": "2025-12-28T10:15:00", "category": "success_pattern", "context": "Unified trial management with TrialManager and DashboardDB", "insight": "TRIAL MANAGEMENT PATTERN: Use TrialManager for consistent trial_NNNN naming across all optimization methods (Optuna, Turbo, GNN, manual). Key principles: (1) Trial numbers NEVER reset (monotonic), (2) Folders NEVER get overwritten, (3) Database always synced with filesystem, (4) Surrogate predictions are NOT trials - only FEA results. DashboardDB provides Optuna-compatible schema for dashboard integration. Path: optimization_engine/utils/trial_manager.py", "confidence": 0.95, "tags": ["trial_manager", "dashboard_db", "optuna", "trial_naming", "turbo"]}
|
||||||
{"timestamp": "2025-12-28T10:15:00", "category": "success_pattern", "context": "GNN Turbo training data loading from multiple studies", "insight": "MULTI-STUDY TRAINING: When loading training data from multiple prior studies for GNN surrogate training, param names may have unit prefixes like '[mm]rib_thickness' or '[Degrees]angle'. Strip prefixes: if ']' in name: name = name.split(']', 1)[1]. Also, objective attribute names vary between studies (rel_filtered_rms_40_vs_20 vs obj_rel_filtered_rms_40_vs_20) - use fallback chain with 'or'. V5 successfully trained on 316 samples (V3: 297, V4: 19) with R²=[0.94, 0.94, 0.89, 0.95].", "confidence": 0.9, "tags": ["gnn", "turbo", "training_data", "multi_study", "param_naming"]}
|
{"timestamp": "2025-12-28T10:15:00", "category": "success_pattern", "context": "GNN Turbo training data loading from multiple studies", "insight": "MULTI-STUDY TRAINING: When loading training data from multiple prior studies for GNN surrogate training, param names may have unit prefixes like '[mm]rib_thickness' or '[Degrees]angle'. Strip prefixes: if ']' in name: name = name.split(']', 1)[1]. Also, objective attribute names vary between studies (rel_filtered_rms_40_vs_20 vs obj_rel_filtered_rms_40_vs_20) - use fallback chain with 'or'. V5 successfully trained on 316 samples (V3: 297, V4: 19) with R²=[0.94, 0.94, 0.89, 0.95].", "confidence": 0.9, "tags": ["gnn", "turbo", "training_data", "multi_study", "param_naming"]}
|
||||||
{"timestamp": "2025-12-28T12:28:04.706624", "category": "success_pattern", "context": "Implemented L-BFGS gradient optimizer for surrogate polish phase", "insight": "L-BFGS on trained MLP surrogates provides 100-1000x faster convergence than derivative-free methods (TPE, CMA-ES) for local refinement. Key: use multi-start from top FEA candidates, not random initialization. Integration: GradientOptimizer class in optimization_engine/gradient_optimizer.py.", "confidence": 0.9, "tags": ["optimization", "lbfgs", "surrogate", "gradient", "polish"]}
|
{"timestamp": "2025-12-28T12:28:04.706624", "category": "success_pattern", "context": "Implemented L-BFGS gradient optimizer for surrogate polish phase", "insight": "L-BFGS on trained MLP surrogates provides 100-1000x faster convergence than derivative-free methods (TPE, CMA-ES) for local refinement. Key: use multi-start from top FEA candidates, not random initialization. Integration: GradientOptimizer class in optimization_engine/gradient_optimizer.py.", "confidence": 0.9, "tags": ["optimization", "lbfgs", "surrogate", "gradient", "polish"]}
|
||||||
|
{"timestamp": "2025-12-29T09:30:00", "category": "success_pattern", "context": "V6 pure TPE outperformed V5 surrogate+L-BFGS by 22%", "insight": "SIMPLE BEATS COMPLEX: V6 Pure TPE achieved WS=225.41 vs V5's WS=290.18 (22.3% better). Key insight: surrogates fail when gradient methods descend to OOD regions. Fix: EnsembleSurrogate with (1) N=5 MLPs for disagreement-based uncertainty, (2) OODDetector with KNN+z-score, (3) acquisition_score balancing exploitation+exploration, (4) trust-region L-BFGS that stays in training distribution. Never trust point predictions - always require uncertainty bounds. Protocol: SYS_16_SELF_AWARE_TURBO.md. Code: optimization_engine/surrogates/ensemble_surrogate.py", "confidence": 1.0, "tags": ["ensemble", "uncertainty", "ood", "surrogate", "v6", "tpe", "self-aware"]}
|
||||||
|
{"timestamp": "2025-12-29T09:47:47.612485", "category": "success_pattern", "context": "Disk space optimization for FEA studies", "insight": "Per-trial FEA files are ~150MB but only OP2+JSON (~70MB) are essential. PRT/FEM/SIM/DAT are copies of master files and can be deleted after study completion. Archive to dalidou server for long-term storage.", "confidence": 0.95, "tags": ["disk_optimization", "archival", "study_management", "dalidou"], "related_files": ["optimization_engine/utils/study_archiver.py", "docs/protocols/operations/OP_07_DISK_OPTIMIZATION.md"]}
|
||||||
|
{"timestamp": "2026-01-02T14:30:00", "category": "success_pattern", "context": "Study Interview Mode implementation and routing update", "insight": "STUDY CREATION DEFAULT: Interview Mode is now the DEFAULT for all study creation requests. Triggers: create a study, new study, set up study, optimize this, minimize mass - any study creation intent. Benefits: (1) Material-aware validation checks stress vs yield, (2) Anti-pattern detection warns about mass-no-constraint, (3) Auto extractor mapping E1-E10, (4) State persistence for interrupted sessions, (5) Blueprint generation with full validation. Skip with: skip interview, quick setup, manual config. Implementation: optimization_engine/interview/ with StudyInterviewEngine, QuestionEngine, EngineeringValidator, StudyBlueprint. All 129 tests passing.", "confidence": 1.0, "tags": ["interview_mode", "study_creation", "default", "validation", "anti_pattern", "materials"], "related_files": [".claude/skills/modules/study-interview-mode.md", "docs/protocols/operations/OP_01_CREATE_STUDY.md", "optimization_engine/interview/study_interview.py"]}
|
||||||
|
{"timestamp": "2026-01-02T14:45:00", "category": "success_pattern", "context": "Study Interview Mode implementation complete", "insight": "INTERVIEW MODE DEFAULT: Study creation now uses Interview Mode by default for all study creation requests. This is a major usability improvement. Triggers: create a study, new study, set up, optimize this - any study creation intent. Key features: (1) Material-aware validation with 12 materials and fuzzy name matching, (2) Anti-pattern detection for 12 common mistakes, (3) Auto extractor mapping E1-E24, (4) 7-phase interview flow, (5) State persistence for interrupted sessions, (6) Blueprint validation before generation. Skip with: skip interview, quick setup, manual. Implementation in optimization_engine/interview/ with 129 tests passing. Full documentation in: .claude/skills/modules/study-interview-mode.md, docs/protocols/operations/OP_01_CREATE_STUDY.md", "confidence": 1.0, "tags": ["interview_mode", "study_creation", "default", "usability", "materials", "anti_pattern", "validation"], "related_files": [".claude/skills/modules/study-interview-mode.md", "docs/protocols/operations/OP_01_CREATE_STUDY.md", "optimization_engine/interview/"]}
|
||||||
|
|||||||
@@ -0,0 +1 @@
|
|||||||
|
{"timestamp": "2025-12-29T12:00:00", "category": "user_preference", "context": "Git remote configuration", "insight": "GitHub repository URL is https://github.com/Anto01/Atomizer.git (private repo). Always push to both origin (Gitea at 192.168.86.50:3000) and github remote.", "confidence": 1.0, "tags": ["git", "github", "remote", "configuration"]}
|
||||||
208
migrate_imports.py
Normal file
208
migrate_imports.py
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
optimization_engine Migration Script
|
||||||
|
=====================================
|
||||||
|
Automatically updates all imports across the codebase.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python migrate_imports.py --dry-run # Preview changes
|
||||||
|
python migrate_imports.py --execute # Apply changes
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Tuple
|
||||||
|
|
||||||
|
# Import mappings (old -> new) - using regex patterns
|
||||||
|
IMPORT_MAPPINGS = {
|
||||||
|
# =============================================================================
|
||||||
|
# CORE MODULE
|
||||||
|
# =============================================================================
|
||||||
|
r'from optimization_engine\.runner\b': 'from optimization_engine.core.runner',
|
||||||
|
r'from optimization_engine\.base_runner\b': 'from optimization_engine.core.base_runner',
|
||||||
|
r'from optimization_engine\.runner_with_neural\b': 'from optimization_engine.core.runner_with_neural',
|
||||||
|
r'from optimization_engine\.intelligent_optimizer\b': 'from optimization_engine.core.intelligent_optimizer',
|
||||||
|
r'from optimization_engine\.method_selector\b': 'from optimization_engine.core.method_selector',
|
||||||
|
r'from optimization_engine\.strategy_selector\b': 'from optimization_engine.core.strategy_selector',
|
||||||
|
r'from optimization_engine\.strategy_portfolio\b': 'from optimization_engine.core.strategy_portfolio',
|
||||||
|
r'from optimization_engine\.gradient_optimizer\b': 'from optimization_engine.core.gradient_optimizer',
|
||||||
|
r'import optimization_engine\.runner\b': 'import optimization_engine.core.runner',
|
||||||
|
r'import optimization_engine\.intelligent_optimizer\b': 'import optimization_engine.core.intelligent_optimizer',
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# SURROGATES MODULE
|
||||||
|
# =============================================================================
|
||||||
|
r'from optimization_engine\.neural_surrogate\b': 'from optimization_engine.processors.surrogates.neural_surrogate',
|
||||||
|
r'from optimization_engine\.generic_surrogate\b': 'from optimization_engine.processors.surrogates.generic_surrogate',
|
||||||
|
r'from optimization_engine\.adaptive_surrogate\b': 'from optimization_engine.processors.surrogates.adaptive_surrogate',
|
||||||
|
r'from optimization_engine\.simple_mlp_surrogate\b': 'from optimization_engine.processors.surrogates.simple_mlp_surrogate',
|
||||||
|
r'from optimization_engine\.active_learning_surrogate\b': 'from optimization_engine.processors.surrogates.active_learning_surrogate',
|
||||||
|
r'from optimization_engine\.surrogate_tuner\b': 'from optimization_engine.processors.surrogates.surrogate_tuner',
|
||||||
|
r'from optimization_engine\.auto_trainer\b': 'from optimization_engine.processors.surrogates.auto_trainer',
|
||||||
|
r'from optimization_engine\.training_data_exporter\b': 'from optimization_engine.processors.surrogates.training_data_exporter',
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# NX MODULE
|
||||||
|
# =============================================================================
|
||||||
|
r'from optimization_engine\.nx_solver\b': 'from optimization_engine.nx.solver',
|
||||||
|
r'from optimization_engine\.nx_updater\b': 'from optimization_engine.nx.updater',
|
||||||
|
r'from optimization_engine\.nx_session_manager\b': 'from optimization_engine.nx.session_manager',
|
||||||
|
r'from optimization_engine\.solve_simulation\b': 'from optimization_engine.nx.solve_simulation',
|
||||||
|
r'from optimization_engine\.solve_simulation_simple\b': 'from optimization_engine.nx.solve_simulation_simple',
|
||||||
|
r'from optimization_engine\.model_cleanup\b': 'from optimization_engine.nx.model_cleanup',
|
||||||
|
r'from optimization_engine\.export_expressions\b': 'from optimization_engine.nx.export_expressions',
|
||||||
|
r'from optimization_engine\.import_expressions\b': 'from optimization_engine.nx.import_expressions',
|
||||||
|
r'from optimization_engine\.mesh_converter\b': 'from optimization_engine.nx.mesh_converter',
|
||||||
|
r'import optimization_engine\.nx_solver\b': 'import optimization_engine.nx.solver',
|
||||||
|
r'import optimization_engine\.nx_updater\b': 'import optimization_engine.nx.updater',
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# STUDY MODULE
|
||||||
|
# =============================================================================
|
||||||
|
r'from optimization_engine\.study_creator\b': 'from optimization_engine.study.creator',
|
||||||
|
r'from optimization_engine\.study_wizard\b': 'from optimization_engine.study.wizard',
|
||||||
|
r'from optimization_engine\.study_state\b': 'from optimization_engine.study.state',
|
||||||
|
r'from optimization_engine\.study_reset\b': 'from optimization_engine.study.reset',
|
||||||
|
r'from optimization_engine\.study_continuation\b': 'from optimization_engine.study.continuation',
|
||||||
|
r'from optimization_engine\.benchmarking_substudy\b': 'from optimization_engine.study.benchmarking',
|
||||||
|
r'from optimization_engine\.generate_history_from_trials\b': 'from optimization_engine.study.history_generator',
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# REPORTING MODULE
|
||||||
|
# =============================================================================
|
||||||
|
r'from optimization_engine\.generate_report\b': 'from optimization_engine.reporting.report_generator',
|
||||||
|
r'from optimization_engine\.generate_report_markdown\b': 'from optimization_engine.reporting.markdown_report',
|
||||||
|
r'from optimization_engine\.comprehensive_results_analyzer\b': 'from optimization_engine.reporting.results_analyzer',
|
||||||
|
r'from optimization_engine\.visualizer\b': 'from optimization_engine.reporting.visualizer',
|
||||||
|
r'from optimization_engine\.landscape_analyzer\b': 'from optimization_engine.reporting.landscape_analyzer',
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# CONFIG MODULE
|
||||||
|
# =============================================================================
|
||||||
|
r'from optimization_engine\.config_manager\b': 'from optimization_engine.config.manager',
|
||||||
|
r'from optimization_engine\.optimization_config_builder\b': 'from optimization_engine.config.builder',
|
||||||
|
r'from optimization_engine\.optimization_setup_wizard\b': 'from optimization_engine.config.setup_wizard',
|
||||||
|
r'from optimization_engine\.capability_matcher\b': 'from optimization_engine.config.capability_matcher',
|
||||||
|
r'from optimization_engine\.template_loader\b': 'from optimization_engine.config.template_loader',
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# UTILS MODULE
|
||||||
|
# =============================================================================
|
||||||
|
r'from optimization_engine\.logger\b': 'from optimization_engine.utils.logger',
|
||||||
|
r'from optimization_engine\.auto_doc\b': 'from optimization_engine.utils.auto_doc',
|
||||||
|
r'from optimization_engine\.realtime_tracking\b': 'from optimization_engine.utils.realtime_tracking',
|
||||||
|
r'from optimization_engine\.codebase_analyzer\b': 'from optimization_engine.utils.codebase_analyzer',
|
||||||
|
r'from optimization_engine\.pruning_logger\b': 'from optimization_engine.utils.pruning_logger',
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# FUTURE MODULE
|
||||||
|
# =============================================================================
|
||||||
|
r'from optimization_engine\.research_agent\b': 'from optimization_engine.future.research_agent',
|
||||||
|
r'from optimization_engine\.pynastran_research_agent\b': 'from optimization_engine.future.pynastran_research_agent',
|
||||||
|
r'from optimization_engine\.targeted_research_planner\b': 'from optimization_engine.future.targeted_research_planner',
|
||||||
|
r'from optimization_engine\.workflow_decomposer\b': 'from optimization_engine.future.workflow_decomposer',
|
||||||
|
r'from optimization_engine\.step_classifier\b': 'from optimization_engine.future.step_classifier',
|
||||||
|
r'from optimization_engine\.llm_optimization_runner\b': 'from optimization_engine.future.llm_optimization_runner',
|
||||||
|
r'from optimization_engine\.llm_workflow_analyzer\b': 'from optimization_engine.future.llm_workflow_analyzer',
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# EXTRACTORS/VALIDATORS additions
|
||||||
|
# =============================================================================
|
||||||
|
r'from optimization_engine\.op2_extractor\b': 'from optimization_engine.extractors.op2_extractor',
|
||||||
|
r'from optimization_engine\.extractor_library\b': 'from optimization_engine.extractors.extractor_library',
|
||||||
|
r'from optimization_engine\.simulation_validator\b': 'from optimization_engine.validators.simulation_validator',
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# PROCESSORS
|
||||||
|
# =============================================================================
|
||||||
|
r'from optimization_engine\.adaptive_characterization\b': 'from optimization_engine.processors.adaptive_characterization',
|
||||||
|
}
|
||||||
|
|
||||||
|
# Also need to handle utils submodule imports that moved
|
||||||
|
UTILS_MAPPINGS = {
|
||||||
|
r'from optimization_engine\.utils\.nx_session_manager\b': 'from optimization_engine.nx.session_manager',
|
||||||
|
}
|
||||||
|
|
||||||
|
# Combine all mappings
|
||||||
|
ALL_MAPPINGS = {**IMPORT_MAPPINGS, **UTILS_MAPPINGS}
|
||||||
|
|
||||||
|
def find_files(root: Path, extensions: List[str], exclude_dirs: List[str] = None) -> List[Path]:
|
||||||
|
"""Find all files with given extensions, excluding certain directories."""
|
||||||
|
if exclude_dirs is None:
|
||||||
|
exclude_dirs = ['optimization_engine_BACKUP', '.venv', 'node_modules', '__pycache__', '.git']
|
||||||
|
|
||||||
|
files = []
|
||||||
|
for ext in extensions:
|
||||||
|
for f in root.rglob(f'*{ext}'):
|
||||||
|
# Check if any excluded dir is in the path
|
||||||
|
if not any(excl in str(f) for excl in exclude_dirs):
|
||||||
|
files.append(f)
|
||||||
|
return files
|
||||||
|
|
||||||
|
def update_file(filepath: Path, mappings: Dict[str, str], dry_run: bool = True) -> Tuple[int, List[str]]:
|
||||||
|
"""Update imports in a single file."""
|
||||||
|
try:
|
||||||
|
content = filepath.read_text(encoding='utf-8', errors='ignore')
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ERROR reading {filepath}: {e}")
|
||||||
|
return 0, []
|
||||||
|
|
||||||
|
changes = []
|
||||||
|
new_content = content
|
||||||
|
|
||||||
|
for pattern, replacement in mappings.items():
|
||||||
|
matches = re.findall(pattern, content)
|
||||||
|
if matches:
|
||||||
|
new_content = re.sub(pattern, replacement, new_content)
|
||||||
|
changes.append(f" {pattern} -> {replacement} ({len(matches)} occurrences)")
|
||||||
|
|
||||||
|
if changes and not dry_run:
|
||||||
|
filepath.write_text(new_content, encoding='utf-8')
|
||||||
|
|
||||||
|
return len(changes), changes
|
||||||
|
|
||||||
|
def main():
|
||||||
|
dry_run = '--dry-run' in sys.argv or '--execute' not in sys.argv
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
print("=" * 60)
|
||||||
|
print("DRY RUN MODE - No files will be modified")
|
||||||
|
print("=" * 60)
|
||||||
|
else:
|
||||||
|
print("=" * 60)
|
||||||
|
print("EXECUTE MODE - Files will be modified!")
|
||||||
|
print("=" * 60)
|
||||||
|
confirm = input("Are you sure? (yes/no): ")
|
||||||
|
if confirm.lower() != 'yes':
|
||||||
|
print("Aborted.")
|
||||||
|
return
|
||||||
|
|
||||||
|
root = Path('.')
|
||||||
|
|
||||||
|
# Find all Python files
|
||||||
|
py_files = find_files(root, ['.py'])
|
||||||
|
print(f"\nFound {len(py_files)} Python files to check")
|
||||||
|
|
||||||
|
total_changes = 0
|
||||||
|
files_changed = 0
|
||||||
|
|
||||||
|
for filepath in sorted(py_files):
|
||||||
|
count, changes = update_file(filepath, ALL_MAPPINGS, dry_run)
|
||||||
|
if count > 0:
|
||||||
|
files_changed += 1
|
||||||
|
total_changes += count
|
||||||
|
print(f"\n{filepath} ({count} changes):")
|
||||||
|
for change in changes:
|
||||||
|
print(change)
|
||||||
|
|
||||||
|
print("\n" + "=" * 60)
|
||||||
|
print(f"SUMMARY: {total_changes} changes in {files_changed} files")
|
||||||
|
print("=" * 60)
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
print("\nTo apply changes, run: python migrate_imports.py --execute")
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
@@ -1,7 +1,165 @@
|
|||||||
"""
|
"""
|
||||||
Atomizer Optimization Engine
|
Atomizer Optimization Engine
|
||||||
|
============================
|
||||||
|
|
||||||
Core optimization logic with Optuna integration for NX Simcenter.
|
Structural optimization framework for Siemens NX.
|
||||||
|
|
||||||
|
New Module Structure (v2.0):
|
||||||
|
- core/ - Optimization runners
|
||||||
|
- processors/ - Data processing (surrogates, dynamic_response)
|
||||||
|
- nx/ - NX/Nastran integration
|
||||||
|
- study/ - Study management
|
||||||
|
- reporting/ - Reports and analysis
|
||||||
|
- config/ - Configuration
|
||||||
|
- extractors/ - Physics extraction (unchanged)
|
||||||
|
- insights/ - Visualizations (unchanged)
|
||||||
|
- gnn/ - Graph neural networks (unchanged)
|
||||||
|
- hooks/ - NX hooks (unchanged)
|
||||||
|
- utils/ - Utilities
|
||||||
|
- validators/ - Validation (unchanged)
|
||||||
|
|
||||||
|
Quick Start:
|
||||||
|
from optimization_engine.core import OptimizationRunner
|
||||||
|
from optimization_engine.nx import NXSolver
|
||||||
|
from optimization_engine.extractors import extract_displacement
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.1.0"
|
__version__ = '2.0.0'
|
||||||
|
|
||||||
|
import warnings as _warnings
|
||||||
|
import importlib as _importlib
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# SUBMODULE LIST
|
||||||
|
# =============================================================================
|
||||||
|
_SUBMODULES = {
|
||||||
|
'core', 'processors', 'nx', 'study', 'reporting', 'config',
|
||||||
|
'extractors', 'insights', 'gnn', 'hooks', 'utils', 'validators',
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# BACKWARDS COMPATIBILITY LAYER
|
||||||
|
# =============================================================================
|
||||||
|
# These aliases allow old imports to work with deprecation warnings.
|
||||||
|
# Will be removed in v3.0.
|
||||||
|
|
||||||
|
_DEPRECATED_MAPPINGS = {
|
||||||
|
# Core
|
||||||
|
'runner': 'optimization_engine.core.runner',
|
||||||
|
'base_runner': 'optimization_engine.core.base_runner',
|
||||||
|
'intelligent_optimizer': 'optimization_engine.core.intelligent_optimizer',
|
||||||
|
'method_selector': 'optimization_engine.core.method_selector',
|
||||||
|
'strategy_selector': 'optimization_engine.core.strategy_selector',
|
||||||
|
'strategy_portfolio': 'optimization_engine.core.strategy_portfolio',
|
||||||
|
'gradient_optimizer': 'optimization_engine.core.gradient_optimizer',
|
||||||
|
'runner_with_neural': 'optimization_engine.core.runner_with_neural',
|
||||||
|
|
||||||
|
# Surrogates
|
||||||
|
'neural_surrogate': 'optimization_engine.processors.surrogates.neural_surrogate',
|
||||||
|
'generic_surrogate': 'optimization_engine.processors.surrogates.generic_surrogate',
|
||||||
|
'adaptive_surrogate': 'optimization_engine.processors.surrogates.adaptive_surrogate',
|
||||||
|
'simple_mlp_surrogate': 'optimization_engine.processors.surrogates.simple_mlp_surrogate',
|
||||||
|
'active_learning_surrogate': 'optimization_engine.processors.surrogates.active_learning_surrogate',
|
||||||
|
'surrogate_tuner': 'optimization_engine.processors.surrogates.surrogate_tuner',
|
||||||
|
'auto_trainer': 'optimization_engine.processors.surrogates.auto_trainer',
|
||||||
|
'training_data_exporter': 'optimization_engine.processors.surrogates.training_data_exporter',
|
||||||
|
|
||||||
|
# NX
|
||||||
|
'nx_solver': 'optimization_engine.nx.solver',
|
||||||
|
'nx_updater': 'optimization_engine.nx.updater',
|
||||||
|
'nx_session_manager': 'optimization_engine.nx.session_manager',
|
||||||
|
'solve_simulation': 'optimization_engine.nx.solve_simulation',
|
||||||
|
'solve_simulation_simple': 'optimization_engine.nx.solve_simulation_simple',
|
||||||
|
'model_cleanup': 'optimization_engine.nx.model_cleanup',
|
||||||
|
'export_expressions': 'optimization_engine.nx.export_expressions',
|
||||||
|
'import_expressions': 'optimization_engine.nx.import_expressions',
|
||||||
|
'mesh_converter': 'optimization_engine.nx.mesh_converter',
|
||||||
|
|
||||||
|
# Study
|
||||||
|
'study_creator': 'optimization_engine.study.creator',
|
||||||
|
'study_wizard': 'optimization_engine.study.wizard',
|
||||||
|
'study_state': 'optimization_engine.study.state',
|
||||||
|
'study_reset': 'optimization_engine.study.reset',
|
||||||
|
'study_continuation': 'optimization_engine.study.continuation',
|
||||||
|
'benchmarking_substudy': 'optimization_engine.study.benchmarking',
|
||||||
|
'generate_history_from_trials': 'optimization_engine.study.history_generator',
|
||||||
|
|
||||||
|
# Reporting
|
||||||
|
'generate_report': 'optimization_engine.reporting.report_generator',
|
||||||
|
'generate_report_markdown': 'optimization_engine.reporting.markdown_report',
|
||||||
|
'comprehensive_results_analyzer': 'optimization_engine.reporting.results_analyzer',
|
||||||
|
'visualizer': 'optimization_engine.reporting.visualizer',
|
||||||
|
'landscape_analyzer': 'optimization_engine.reporting.landscape_analyzer',
|
||||||
|
|
||||||
|
# Config
|
||||||
|
'config_manager': 'optimization_engine.config.manager',
|
||||||
|
'optimization_config_builder': 'optimization_engine.config.builder',
|
||||||
|
'optimization_setup_wizard': 'optimization_engine.config.setup_wizard',
|
||||||
|
'capability_matcher': 'optimization_engine.config.capability_matcher',
|
||||||
|
'template_loader': 'optimization_engine.config.template_loader',
|
||||||
|
|
||||||
|
# Utils
|
||||||
|
'logger': 'optimization_engine.utils.logger',
|
||||||
|
'auto_doc': 'optimization_engine.utils.auto_doc',
|
||||||
|
'realtime_tracking': 'optimization_engine.utils.realtime_tracking',
|
||||||
|
'codebase_analyzer': 'optimization_engine.utils.codebase_analyzer',
|
||||||
|
'pruning_logger': 'optimization_engine.utils.pruning_logger',
|
||||||
|
|
||||||
|
# Future
|
||||||
|
'research_agent': 'optimization_engine.future.research_agent',
|
||||||
|
'pynastran_research_agent': 'optimization_engine.future.pynastran_research_agent',
|
||||||
|
'targeted_research_planner': 'optimization_engine.future.targeted_research_planner',
|
||||||
|
'workflow_decomposer': 'optimization_engine.future.workflow_decomposer',
|
||||||
|
'step_classifier': 'optimization_engine.future.step_classifier',
|
||||||
|
|
||||||
|
# Extractors/Validators
|
||||||
|
'op2_extractor': 'optimization_engine.extractors.op2_extractor',
|
||||||
|
'extractor_library': 'optimization_engine.extractors.extractor_library',
|
||||||
|
'simulation_validator': 'optimization_engine.validators.simulation_validator',
|
||||||
|
|
||||||
|
# Processors
|
||||||
|
'adaptive_characterization': 'optimization_engine.processors.adaptive_characterization',
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# LAZY LOADING
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def __getattr__(name):
|
||||||
|
"""Lazy import for submodules and backwards compatibility."""
|
||||||
|
# Handle submodule imports (e.g., from optimization_engine import core)
|
||||||
|
if name in _SUBMODULES:
|
||||||
|
return _importlib.import_module(f'optimization_engine.{name}')
|
||||||
|
|
||||||
|
# Handle deprecated imports with warnings
|
||||||
|
if name in _DEPRECATED_MAPPINGS:
|
||||||
|
new_module = _DEPRECATED_MAPPINGS[name]
|
||||||
|
_warnings.warn(
|
||||||
|
f"Importing '{name}' from optimization_engine is deprecated. "
|
||||||
|
f"Use '{new_module}' instead. "
|
||||||
|
f"This will be removed in v3.0.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2
|
||||||
|
)
|
||||||
|
return _importlib.import_module(new_module)
|
||||||
|
|
||||||
|
raise AttributeError(f"module 'optimization_engine' has no attribute '{name}'")
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
# Version
|
||||||
|
'__version__',
|
||||||
|
# Submodules
|
||||||
|
'core',
|
||||||
|
'processors',
|
||||||
|
'nx',
|
||||||
|
'study',
|
||||||
|
'reporting',
|
||||||
|
'config',
|
||||||
|
'extractors',
|
||||||
|
'insights',
|
||||||
|
'gnn',
|
||||||
|
'hooks',
|
||||||
|
'utils',
|
||||||
|
'validators',
|
||||||
|
]
|
||||||
|
|||||||
43
optimization_engine/config/__init__.py
Normal file
43
optimization_engine/config/__init__.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
"""
|
||||||
|
Configuration Management
|
||||||
|
========================
|
||||||
|
|
||||||
|
Configuration loading, validation, and building.
|
||||||
|
|
||||||
|
Modules:
|
||||||
|
- manager: ConfigManager for loading/saving configs
|
||||||
|
- builder: OptimizationConfigBuilder for creating configs
|
||||||
|
- setup_wizard: Interactive configuration setup
|
||||||
|
- capability_matcher: Match capabilities to requirements
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Lazy imports to avoid circular dependencies
|
||||||
|
def __getattr__(name):
|
||||||
|
if name == 'ConfigManager':
|
||||||
|
from .manager import ConfigManager
|
||||||
|
return ConfigManager
|
||||||
|
elif name == 'ConfigValidationError':
|
||||||
|
from .manager import ConfigValidationError
|
||||||
|
return ConfigValidationError
|
||||||
|
elif name == 'OptimizationConfigBuilder':
|
||||||
|
from .builder import OptimizationConfigBuilder
|
||||||
|
return OptimizationConfigBuilder
|
||||||
|
elif name == 'SetupWizard':
|
||||||
|
from .setup_wizard import SetupWizard
|
||||||
|
return SetupWizard
|
||||||
|
elif name == 'CapabilityMatcher':
|
||||||
|
from .capability_matcher import CapabilityMatcher
|
||||||
|
return CapabilityMatcher
|
||||||
|
elif name == 'TemplateLoader':
|
||||||
|
from .template_loader import TemplateLoader
|
||||||
|
return TemplateLoader
|
||||||
|
raise AttributeError(f"module 'optimization_engine.config' has no attribute '{name}'")
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'ConfigManager',
|
||||||
|
'ConfigValidationError',
|
||||||
|
'OptimizationConfigBuilder',
|
||||||
|
'SetupWizard',
|
||||||
|
'CapabilityMatcher',
|
||||||
|
'TemplateLoader',
|
||||||
|
]
|
||||||
@@ -12,8 +12,8 @@ Last Updated: 2025-01-16
|
|||||||
from typing import Dict, List, Any, Optional
|
from typing import Dict, List, Any, Optional
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
|
||||||
from optimization_engine.workflow_decomposer import WorkflowStep
|
from optimization_engine.future.workflow_decomposer import WorkflowStep
|
||||||
from optimization_engine.codebase_analyzer import CodebaseCapabilityAnalyzer
|
from optimization_engine.utils.codebase_analyzer import CodebaseCapabilityAnalyzer
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -282,7 +282,7 @@ class CapabilityMatcher:
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Test the capability matcher."""
|
"""Test the capability matcher."""
|
||||||
from optimization_engine.workflow_decomposer import WorkflowDecomposer
|
from optimization_engine.future.workflow_decomposer import WorkflowDecomposer
|
||||||
|
|
||||||
print("Capability Matcher Test")
|
print("Capability Matcher Test")
|
||||||
print("=" * 80)
|
print("=" * 80)
|
||||||
@@ -5,7 +5,7 @@ ensuring consistency across all studies.
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
# In run_optimization.py
|
# In run_optimization.py
|
||||||
from optimization_engine.config_manager import ConfigManager
|
from optimization_engine.config.manager import ConfigManager
|
||||||
|
|
||||||
config_manager = ConfigManager(Path(__file__).parent / "1_setup" / "optimization_config.json")
|
config_manager = ConfigManager(Path(__file__).parent / "1_setup" / "optimization_config.json")
|
||||||
config_manager.load_config()
|
config_manager.load_config()
|
||||||
@@ -21,8 +21,8 @@ from typing import Dict, Any, List, Optional, Tuple
|
|||||||
import logging
|
import logging
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
|
||||||
from optimization_engine.nx_updater import NXParameterUpdater
|
from optimization_engine.nx.updater import NXParameterUpdater
|
||||||
from optimization_engine.nx_solver import NXSolver
|
from optimization_engine.nx.solver import NXSolver
|
||||||
from optimization_engine.extractor_orchestrator import ExtractorOrchestrator
|
from optimization_engine.extractor_orchestrator import ExtractorOrchestrator
|
||||||
from optimization_engine.inline_code_generator import InlineCodeGenerator
|
from optimization_engine.inline_code_generator import InlineCodeGenerator
|
||||||
from optimization_engine.plugins.hook_manager import HookManager
|
from optimization_engine.plugins.hook_manager import HookManager
|
||||||
@@ -4,7 +4,7 @@ Template Loader for Atomizer Optimization Studies
|
|||||||
Creates new studies from templates with automatic folder structure creation.
|
Creates new studies from templates with automatic folder structure creation.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
from optimization_engine.template_loader import create_study_from_template, list_templates
|
from optimization_engine.config.template_loader import create_study_from_template, list_templates
|
||||||
|
|
||||||
# List available templates
|
# List available templates
|
||||||
templates = list_templates()
|
templates = list_templates()
|
||||||
123
optimization_engine/context/__init__.py
Normal file
123
optimization_engine/context/__init__.py
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
"""
|
||||||
|
Atomizer Context Engineering Module
|
||||||
|
|
||||||
|
Implements state-of-the-art context engineering for LLM-powered optimization.
|
||||||
|
Based on the ACE (Agentic Context Engineering) framework.
|
||||||
|
|
||||||
|
Components:
|
||||||
|
- Playbook: Structured knowledge store with helpful/harmful tracking
|
||||||
|
- Reflector: Analyzes optimization outcomes to extract insights
|
||||||
|
- SessionState: Context isolation with exposed/isolated separation
|
||||||
|
- CacheMonitor: KV-cache optimization for cost reduction
|
||||||
|
- FeedbackLoop: Automated learning from execution
|
||||||
|
- Compaction: Long-running session context management
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
from optimization_engine.context import (
|
||||||
|
AtomizerPlaybook,
|
||||||
|
AtomizerReflector,
|
||||||
|
AtomizerSessionState,
|
||||||
|
FeedbackLoop,
|
||||||
|
CompactionManager
|
||||||
|
)
|
||||||
|
|
||||||
|
# Load or create playbook
|
||||||
|
playbook = AtomizerPlaybook.load(path)
|
||||||
|
|
||||||
|
# Create feedback loop for learning
|
||||||
|
feedback = FeedbackLoop(playbook_path)
|
||||||
|
|
||||||
|
# Process trial results
|
||||||
|
feedback.process_trial_result(...)
|
||||||
|
|
||||||
|
# Finalize and commit learning
|
||||||
|
feedback.finalize_study(stats)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .playbook import (
|
||||||
|
AtomizerPlaybook,
|
||||||
|
PlaybookItem,
|
||||||
|
InsightCategory,
|
||||||
|
get_playbook,
|
||||||
|
save_playbook,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .reflector import (
|
||||||
|
AtomizerReflector,
|
||||||
|
OptimizationOutcome,
|
||||||
|
InsightCandidate,
|
||||||
|
ReflectorFactory,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .session_state import (
|
||||||
|
AtomizerSessionState,
|
||||||
|
ExposedState,
|
||||||
|
IsolatedState,
|
||||||
|
TaskType,
|
||||||
|
get_session,
|
||||||
|
set_session,
|
||||||
|
clear_session,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .cache_monitor import (
|
||||||
|
ContextCacheOptimizer,
|
||||||
|
CacheStats,
|
||||||
|
ContextSection,
|
||||||
|
StablePrefixBuilder,
|
||||||
|
get_cache_optimizer,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .feedback_loop import (
|
||||||
|
FeedbackLoop,
|
||||||
|
FeedbackLoopFactory,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .compaction import (
|
||||||
|
CompactionManager,
|
||||||
|
ContextEvent,
|
||||||
|
EventType,
|
||||||
|
ContextBudgetManager,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
# Playbook
|
||||||
|
"AtomizerPlaybook",
|
||||||
|
"PlaybookItem",
|
||||||
|
"InsightCategory",
|
||||||
|
"get_playbook",
|
||||||
|
"save_playbook",
|
||||||
|
|
||||||
|
# Reflector
|
||||||
|
"AtomizerReflector",
|
||||||
|
"OptimizationOutcome",
|
||||||
|
"InsightCandidate",
|
||||||
|
"ReflectorFactory",
|
||||||
|
|
||||||
|
# Session State
|
||||||
|
"AtomizerSessionState",
|
||||||
|
"ExposedState",
|
||||||
|
"IsolatedState",
|
||||||
|
"TaskType",
|
||||||
|
"get_session",
|
||||||
|
"set_session",
|
||||||
|
"clear_session",
|
||||||
|
|
||||||
|
# Cache Monitor
|
||||||
|
"ContextCacheOptimizer",
|
||||||
|
"CacheStats",
|
||||||
|
"ContextSection",
|
||||||
|
"StablePrefixBuilder",
|
||||||
|
"get_cache_optimizer",
|
||||||
|
|
||||||
|
# Feedback Loop
|
||||||
|
"FeedbackLoop",
|
||||||
|
"FeedbackLoopFactory",
|
||||||
|
|
||||||
|
# Compaction
|
||||||
|
"CompactionManager",
|
||||||
|
"ContextEvent",
|
||||||
|
"EventType",
|
||||||
|
"ContextBudgetManager",
|
||||||
|
]
|
||||||
|
|
||||||
|
__version__ = "1.0.0"
|
||||||
390
optimization_engine/context/cache_monitor.py
Normal file
390
optimization_engine/context/cache_monitor.py
Normal file
@@ -0,0 +1,390 @@
|
|||||||
|
"""
|
||||||
|
Atomizer Cache Monitor - KV-Cache Optimization
|
||||||
|
|
||||||
|
Part of the ACE (Agentic Context Engineering) implementation for Atomizer.
|
||||||
|
|
||||||
|
Monitors and optimizes KV-cache hit rates for cost reduction.
|
||||||
|
Based on the principle that cached tokens cost ~10x less than uncached.
|
||||||
|
|
||||||
|
The cache monitor tracks:
|
||||||
|
- Stable prefix length (should stay constant for cache hits)
|
||||||
|
- Cache hit rate across requests
|
||||||
|
- Estimated cost savings
|
||||||
|
|
||||||
|
Structure for KV-cache optimization:
|
||||||
|
1. STABLE PREFIX - Never changes (identity, tools, routing)
|
||||||
|
2. SEMI-STABLE - Changes per session type (protocols, playbook)
|
||||||
|
3. DYNAMIC - Changes every turn (state, user message)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Optional, List, Dict, Any
|
||||||
|
from datetime import datetime
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CacheStats:
|
||||||
|
"""Statistics for cache efficiency tracking."""
|
||||||
|
total_requests: int = 0
|
||||||
|
cache_hits: int = 0
|
||||||
|
cache_misses: int = 0
|
||||||
|
prefix_length_chars: int = 0
|
||||||
|
prefix_length_tokens: int = 0 # Estimated
|
||||||
|
|
||||||
|
@property
|
||||||
|
def hit_rate(self) -> float:
|
||||||
|
"""Calculate cache hit rate (0.0-1.0)."""
|
||||||
|
if self.total_requests == 0:
|
||||||
|
return 0.0
|
||||||
|
return self.cache_hits / self.total_requests
|
||||||
|
|
||||||
|
@property
|
||||||
|
def estimated_savings_percent(self) -> float:
|
||||||
|
"""
|
||||||
|
Estimate cost savings from cache hits.
|
||||||
|
|
||||||
|
Based on ~10x cost difference between cached/uncached tokens.
|
||||||
|
"""
|
||||||
|
if self.total_requests == 0:
|
||||||
|
return 0.0
|
||||||
|
# Cached tokens cost ~10% of uncached
|
||||||
|
# So savings = hit_rate * 90%
|
||||||
|
return self.hit_rate * 90.0
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to dictionary."""
|
||||||
|
return {
|
||||||
|
"total_requests": self.total_requests,
|
||||||
|
"cache_hits": self.cache_hits,
|
||||||
|
"cache_misses": self.cache_misses,
|
||||||
|
"hit_rate": self.hit_rate,
|
||||||
|
"prefix_length_chars": self.prefix_length_chars,
|
||||||
|
"prefix_length_tokens": self.prefix_length_tokens,
|
||||||
|
"estimated_savings_percent": self.estimated_savings_percent
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ContextSection:
|
||||||
|
"""A section of context with stability classification."""
|
||||||
|
name: str
|
||||||
|
content: str
|
||||||
|
stability: str # "stable", "semi_stable", "dynamic"
|
||||||
|
last_hash: str = ""
|
||||||
|
|
||||||
|
def compute_hash(self) -> str:
|
||||||
|
"""Compute content hash for change detection."""
|
||||||
|
return hashlib.md5(self.content.encode()).hexdigest()
|
||||||
|
|
||||||
|
def has_changed(self) -> bool:
|
||||||
|
"""Check if content has changed since last hash."""
|
||||||
|
current_hash = self.compute_hash()
|
||||||
|
changed = current_hash != self.last_hash
|
||||||
|
self.last_hash = current_hash
|
||||||
|
return changed
|
||||||
|
|
||||||
|
|
||||||
|
class ContextCacheOptimizer:
|
||||||
|
"""
|
||||||
|
Tracks and optimizes context for cache efficiency.
|
||||||
|
|
||||||
|
Implements the three-tier context structure:
|
||||||
|
1. Stable prefix (cached across all requests)
|
||||||
|
2. Semi-stable section (cached per session type)
|
||||||
|
3. Dynamic section (changes every turn)
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
optimizer = ContextCacheOptimizer()
|
||||||
|
|
||||||
|
# Build context with cache optimization
|
||||||
|
context = optimizer.prepare_context(
|
||||||
|
stable_prefix=identity_and_tools,
|
||||||
|
semi_stable=protocols_and_playbook,
|
||||||
|
dynamic=state_and_message
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check efficiency
|
||||||
|
print(optimizer.get_report())
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Approximate tokens per character for estimation
|
||||||
|
CHARS_PER_TOKEN = 4
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.stats = CacheStats()
|
||||||
|
self._sections: Dict[str, ContextSection] = {}
|
||||||
|
self._last_stable_hash: Optional[str] = None
|
||||||
|
self._last_semi_stable_hash: Optional[str] = None
|
||||||
|
self._request_history: List[Dict[str, Any]] = []
|
||||||
|
|
||||||
|
def prepare_context(
|
||||||
|
self,
|
||||||
|
stable_prefix: str,
|
||||||
|
semi_stable: str,
|
||||||
|
dynamic: str
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Assemble context optimized for caching.
|
||||||
|
|
||||||
|
Tracks whether prefix changed (cache miss).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
stable_prefix: Content that never changes (tools, identity)
|
||||||
|
semi_stable: Content that changes per session type
|
||||||
|
dynamic: Content that changes every turn
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Assembled context string with clear section boundaries
|
||||||
|
"""
|
||||||
|
# Hash the stable prefix
|
||||||
|
stable_hash = hashlib.md5(stable_prefix.encode()).hexdigest()
|
||||||
|
|
||||||
|
self.stats.total_requests += 1
|
||||||
|
|
||||||
|
# Check for cache hit (stable prefix unchanged)
|
||||||
|
if stable_hash == self._last_stable_hash:
|
||||||
|
self.stats.cache_hits += 1
|
||||||
|
else:
|
||||||
|
self.stats.cache_misses += 1
|
||||||
|
|
||||||
|
self._last_stable_hash = stable_hash
|
||||||
|
self.stats.prefix_length_chars = len(stable_prefix)
|
||||||
|
self.stats.prefix_length_tokens = len(stable_prefix) // self.CHARS_PER_TOKEN
|
||||||
|
|
||||||
|
# Record request for history
|
||||||
|
self._request_history.append({
|
||||||
|
"timestamp": datetime.now().isoformat(),
|
||||||
|
"cache_hit": stable_hash == self._last_stable_hash,
|
||||||
|
"stable_length": len(stable_prefix),
|
||||||
|
"semi_stable_length": len(semi_stable),
|
||||||
|
"dynamic_length": len(dynamic)
|
||||||
|
})
|
||||||
|
|
||||||
|
# Keep history bounded
|
||||||
|
if len(self._request_history) > 100:
|
||||||
|
self._request_history = self._request_history[-100:]
|
||||||
|
|
||||||
|
# Assemble with clear boundaries
|
||||||
|
# Using markdown horizontal rules as section separators
|
||||||
|
return f"""{stable_prefix}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
{semi_stable}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
{dynamic}"""
|
||||||
|
|
||||||
|
def register_section(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
content: str,
|
||||||
|
stability: str = "dynamic"
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Register a context section for change tracking.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Section identifier
|
||||||
|
content: Section content
|
||||||
|
stability: One of "stable", "semi_stable", "dynamic"
|
||||||
|
"""
|
||||||
|
section = ContextSection(
|
||||||
|
name=name,
|
||||||
|
content=content,
|
||||||
|
stability=stability
|
||||||
|
)
|
||||||
|
section.last_hash = section.compute_hash()
|
||||||
|
self._sections[name] = section
|
||||||
|
|
||||||
|
def check_section_changes(self) -> Dict[str, bool]:
|
||||||
|
"""
|
||||||
|
Check which sections have changed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping section names to change status
|
||||||
|
"""
|
||||||
|
changes = {}
|
||||||
|
for name, section in self._sections.items():
|
||||||
|
changes[name] = section.has_changed()
|
||||||
|
return changes
|
||||||
|
|
||||||
|
def get_stable_sections(self) -> List[str]:
|
||||||
|
"""Get names of sections marked as stable."""
|
||||||
|
return [
|
||||||
|
name for name, section in self._sections.items()
|
||||||
|
if section.stability == "stable"
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_report(self) -> str:
|
||||||
|
"""Generate human-readable cache efficiency report."""
|
||||||
|
return f"""
|
||||||
|
Cache Efficiency Report
|
||||||
|
=======================
|
||||||
|
Requests: {self.stats.total_requests}
|
||||||
|
Cache Hits: {self.stats.cache_hits}
|
||||||
|
Cache Misses: {self.stats.cache_misses}
|
||||||
|
Hit Rate: {self.stats.hit_rate:.1%}
|
||||||
|
|
||||||
|
Stable Prefix:
|
||||||
|
- Characters: {self.stats.prefix_length_chars:,}
|
||||||
|
- Estimated Tokens: {self.stats.prefix_length_tokens:,}
|
||||||
|
|
||||||
|
Cost Impact:
|
||||||
|
- Estimated Savings: {self.stats.estimated_savings_percent:.0f}%
|
||||||
|
- (Based on 10x cost difference for cached tokens)
|
||||||
|
|
||||||
|
Recommendations:
|
||||||
|
{self._get_recommendations()}
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _get_recommendations(self) -> str:
|
||||||
|
"""Generate optimization recommendations."""
|
||||||
|
recommendations = []
|
||||||
|
|
||||||
|
if self.stats.hit_rate < 0.5 and self.stats.total_requests > 5:
|
||||||
|
recommendations.append(
|
||||||
|
"- Low cache hit rate: Check if stable prefix is actually stable"
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.stats.prefix_length_tokens > 5000:
|
||||||
|
recommendations.append(
|
||||||
|
"- Large stable prefix: Consider moving less-stable content to semi-stable"
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.stats.prefix_length_tokens < 1000:
|
||||||
|
recommendations.append(
|
||||||
|
"- Small stable prefix: Consider moving more content to stable section"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not recommendations:
|
||||||
|
recommendations.append("- Cache performance looks good!")
|
||||||
|
|
||||||
|
return "\n".join(recommendations)
|
||||||
|
|
||||||
|
def get_stats_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Get statistics as dictionary."""
|
||||||
|
return self.stats.to_dict()
|
||||||
|
|
||||||
|
def reset_stats(self) -> None:
|
||||||
|
"""Reset all statistics."""
|
||||||
|
self.stats = CacheStats()
|
||||||
|
self._request_history = []
|
||||||
|
|
||||||
|
def save_stats(self, path: Path) -> None:
|
||||||
|
"""Save statistics to JSON file."""
|
||||||
|
data = {
|
||||||
|
"stats": self.stats.to_dict(),
|
||||||
|
"request_history": self._request_history[-50:], # Last 50
|
||||||
|
"sections": {
|
||||||
|
name: {
|
||||||
|
"stability": s.stability,
|
||||||
|
"content_length": len(s.content)
|
||||||
|
}
|
||||||
|
for name, s in self._sections.items()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(path, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(data, f, indent=2)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load_stats(cls, path: Path) -> "ContextCacheOptimizer":
|
||||||
|
"""Load statistics from JSON file."""
|
||||||
|
optimizer = cls()
|
||||||
|
|
||||||
|
if not path.exists():
|
||||||
|
return optimizer
|
||||||
|
|
||||||
|
with open(path, encoding='utf-8') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
stats = data.get("stats", {})
|
||||||
|
optimizer.stats.total_requests = stats.get("total_requests", 0)
|
||||||
|
optimizer.stats.cache_hits = stats.get("cache_hits", 0)
|
||||||
|
optimizer.stats.cache_misses = stats.get("cache_misses", 0)
|
||||||
|
optimizer.stats.prefix_length_chars = stats.get("prefix_length_chars", 0)
|
||||||
|
optimizer.stats.prefix_length_tokens = stats.get("prefix_length_tokens", 0)
|
||||||
|
|
||||||
|
optimizer._request_history = data.get("request_history", [])
|
||||||
|
|
||||||
|
return optimizer
|
||||||
|
|
||||||
|
|
||||||
|
class StablePrefixBuilder:
|
||||||
|
"""
|
||||||
|
Helper for building stable prefix content.
|
||||||
|
|
||||||
|
Ensures consistent ordering and formatting of stable content
|
||||||
|
to maximize cache hits.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._sections: List[tuple] = [] # (order, name, content)
|
||||||
|
|
||||||
|
def add_section(self, name: str, content: str, order: int = 50) -> "StablePrefixBuilder":
|
||||||
|
"""
|
||||||
|
Add a section to the stable prefix.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Section name (for documentation)
|
||||||
|
content: Section content
|
||||||
|
order: Sort order (lower = earlier)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Self for chaining
|
||||||
|
"""
|
||||||
|
self._sections.append((order, name, content))
|
||||||
|
return self
|
||||||
|
|
||||||
|
def add_identity(self, identity: str) -> "StablePrefixBuilder":
|
||||||
|
"""Add identity section (order 10)."""
|
||||||
|
return self.add_section("identity", identity, order=10)
|
||||||
|
|
||||||
|
def add_capabilities(self, capabilities: str) -> "StablePrefixBuilder":
|
||||||
|
"""Add capabilities section (order 20)."""
|
||||||
|
return self.add_section("capabilities", capabilities, order=20)
|
||||||
|
|
||||||
|
def add_tools(self, tools: str) -> "StablePrefixBuilder":
|
||||||
|
"""Add tools section (order 30)."""
|
||||||
|
return self.add_section("tools", tools, order=30)
|
||||||
|
|
||||||
|
def add_routing(self, routing: str) -> "StablePrefixBuilder":
|
||||||
|
"""Add routing section (order 40)."""
|
||||||
|
return self.add_section("routing", routing, order=40)
|
||||||
|
|
||||||
|
def build(self) -> str:
|
||||||
|
"""
|
||||||
|
Build the stable prefix string.
|
||||||
|
|
||||||
|
Sections are sorted by order to ensure consistency.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Assembled stable prefix
|
||||||
|
"""
|
||||||
|
# Sort by order
|
||||||
|
sorted_sections = sorted(self._sections, key=lambda x: x[0])
|
||||||
|
|
||||||
|
lines = []
|
||||||
|
for _, name, content in sorted_sections:
|
||||||
|
lines.append(f"<!-- {name} -->")
|
||||||
|
lines.append(content.strip())
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
# Global cache optimizer instance
|
||||||
|
_global_optimizer: Optional[ContextCacheOptimizer] = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_cache_optimizer() -> ContextCacheOptimizer:
|
||||||
|
"""Get the global cache optimizer instance."""
|
||||||
|
global _global_optimizer
|
||||||
|
if _global_optimizer is None:
|
||||||
|
_global_optimizer = ContextCacheOptimizer()
|
||||||
|
return _global_optimizer
|
||||||
520
optimization_engine/context/compaction.py
Normal file
520
optimization_engine/context/compaction.py
Normal file
@@ -0,0 +1,520 @@
|
|||||||
|
"""
|
||||||
|
Atomizer Context Compaction - Long-Running Session Management
|
||||||
|
|
||||||
|
Part of the ACE (Agentic Context Engineering) implementation for Atomizer.
|
||||||
|
|
||||||
|
Based on Google ADK's compaction architecture:
|
||||||
|
- Trigger compaction when threshold reached
|
||||||
|
- Summarize older events
|
||||||
|
- Preserve recent detail
|
||||||
|
- Never compact error events
|
||||||
|
|
||||||
|
This module handles context management for long-running optimizations
|
||||||
|
that may exceed context window limits.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import List, Dict, Any, Optional
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from datetime import datetime
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
|
||||||
|
class EventType(Enum):
|
||||||
|
"""Types of events in optimization context."""
|
||||||
|
TRIAL_START = "trial_start"
|
||||||
|
TRIAL_COMPLETE = "trial_complete"
|
||||||
|
TRIAL_FAILED = "trial_failed"
|
||||||
|
ERROR = "error"
|
||||||
|
WARNING = "warning"
|
||||||
|
MILESTONE = "milestone"
|
||||||
|
COMPACTION = "compaction"
|
||||||
|
STUDY_START = "study_start"
|
||||||
|
STUDY_END = "study_end"
|
||||||
|
CONFIG_CHANGE = "config_change"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ContextEvent:
|
||||||
|
"""
|
||||||
|
Single event in optimization context.
|
||||||
|
|
||||||
|
Events are the atomic units of context history.
|
||||||
|
They can be compacted (summarized) or preserved based on importance.
|
||||||
|
"""
|
||||||
|
timestamp: datetime
|
||||||
|
event_type: EventType
|
||||||
|
summary: str
|
||||||
|
details: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
compacted: bool = False
|
||||||
|
preserve: bool = False # If True, never compact this event
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to dictionary."""
|
||||||
|
return {
|
||||||
|
"timestamp": self.timestamp.isoformat(),
|
||||||
|
"event_type": self.event_type.value,
|
||||||
|
"summary": self.summary,
|
||||||
|
"details": self.details,
|
||||||
|
"compacted": self.compacted,
|
||||||
|
"preserve": self.preserve
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> "ContextEvent":
|
||||||
|
"""Create from dictionary."""
|
||||||
|
return cls(
|
||||||
|
timestamp=datetime.fromisoformat(data["timestamp"]),
|
||||||
|
event_type=EventType(data["event_type"]),
|
||||||
|
summary=data["summary"],
|
||||||
|
details=data.get("details", {}),
|
||||||
|
compacted=data.get("compacted", False),
|
||||||
|
preserve=data.get("preserve", False)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CompactionManager:
|
||||||
|
"""
|
||||||
|
Manages context compaction for long optimization sessions.
|
||||||
|
|
||||||
|
Strategy:
|
||||||
|
- Keep last N events in full detail
|
||||||
|
- Summarize older events into milestone markers
|
||||||
|
- Preserve error events (never compact errors)
|
||||||
|
- Track statistics for optimization insights
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
manager = CompactionManager(compaction_threshold=50, keep_recent=20)
|
||||||
|
|
||||||
|
# Add events as they occur
|
||||||
|
manager.add_event(ContextEvent(
|
||||||
|
timestamp=datetime.now(),
|
||||||
|
event_type=EventType.TRIAL_COMPLETE,
|
||||||
|
summary="Trial 42 complete: obj=100.5",
|
||||||
|
details={"trial_number": 42, "objective": 100.5}
|
||||||
|
))
|
||||||
|
|
||||||
|
# Get context string for LLM
|
||||||
|
context = manager.get_context_string()
|
||||||
|
|
||||||
|
# Check if compaction occurred
|
||||||
|
print(f"Compactions: {manager.compaction_count}")
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
compaction_threshold: int = 50,
|
||||||
|
keep_recent: int = 20,
|
||||||
|
keep_errors: bool = True
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize compaction manager.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
compaction_threshold: Trigger compaction when events exceed this
|
||||||
|
keep_recent: Number of recent events to always keep in detail
|
||||||
|
keep_errors: Whether to preserve all error events
|
||||||
|
"""
|
||||||
|
self.events: List[ContextEvent] = []
|
||||||
|
self.compaction_threshold = compaction_threshold
|
||||||
|
self.keep_recent = keep_recent
|
||||||
|
self.keep_errors = keep_errors
|
||||||
|
self.compaction_count = 0
|
||||||
|
|
||||||
|
# Statistics for compacted regions
|
||||||
|
self._compaction_stats: List[Dict[str, Any]] = []
|
||||||
|
|
||||||
|
def add_event(self, event: ContextEvent) -> bool:
|
||||||
|
"""
|
||||||
|
Add event and trigger compaction if needed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: The event to add
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if compaction was triggered
|
||||||
|
"""
|
||||||
|
# Mark errors as preserved
|
||||||
|
if event.event_type == EventType.ERROR and self.keep_errors:
|
||||||
|
event.preserve = True
|
||||||
|
|
||||||
|
self.events.append(event)
|
||||||
|
|
||||||
|
# Check if compaction needed
|
||||||
|
if len(self.events) > self.compaction_threshold:
|
||||||
|
self._compact()
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def add_trial_event(
|
||||||
|
self,
|
||||||
|
trial_number: int,
|
||||||
|
success: bool,
|
||||||
|
objective: Optional[float] = None,
|
||||||
|
duration: Optional[float] = None
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Convenience method to add a trial completion event.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
trial_number: Trial number
|
||||||
|
success: Whether trial succeeded
|
||||||
|
objective: Objective value (if successful)
|
||||||
|
duration: Trial duration in seconds
|
||||||
|
"""
|
||||||
|
event_type = EventType.TRIAL_COMPLETE if success else EventType.TRIAL_FAILED
|
||||||
|
|
||||||
|
summary_parts = [f"Trial {trial_number}"]
|
||||||
|
if success and objective is not None:
|
||||||
|
summary_parts.append(f"obj={objective:.4g}")
|
||||||
|
elif not success:
|
||||||
|
summary_parts.append("FAILED")
|
||||||
|
if duration is not None:
|
||||||
|
summary_parts.append(f"{duration:.1f}s")
|
||||||
|
|
||||||
|
self.add_event(ContextEvent(
|
||||||
|
timestamp=datetime.now(),
|
||||||
|
event_type=event_type,
|
||||||
|
summary=" | ".join(summary_parts),
|
||||||
|
details={
|
||||||
|
"trial_number": trial_number,
|
||||||
|
"success": success,
|
||||||
|
"objective": objective,
|
||||||
|
"duration": duration
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
def add_error_event(self, error_message: str, error_type: str = "") -> None:
|
||||||
|
"""
|
||||||
|
Add an error event (always preserved).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
error_message: Error description
|
||||||
|
error_type: Optional error classification
|
||||||
|
"""
|
||||||
|
summary = f"[{error_type}] {error_message}" if error_type else error_message
|
||||||
|
|
||||||
|
self.add_event(ContextEvent(
|
||||||
|
timestamp=datetime.now(),
|
||||||
|
event_type=EventType.ERROR,
|
||||||
|
summary=summary,
|
||||||
|
details={"error_type": error_type, "message": error_message},
|
||||||
|
preserve=True
|
||||||
|
))
|
||||||
|
|
||||||
|
def add_milestone(self, description: str, details: Optional[Dict[str, Any]] = None) -> None:
|
||||||
|
"""
|
||||||
|
Add a milestone event (preserved).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
description: Milestone description
|
||||||
|
details: Optional additional details
|
||||||
|
"""
|
||||||
|
self.add_event(ContextEvent(
|
||||||
|
timestamp=datetime.now(),
|
||||||
|
event_type=EventType.MILESTONE,
|
||||||
|
summary=description,
|
||||||
|
details=details or {},
|
||||||
|
preserve=True
|
||||||
|
))
|
||||||
|
|
||||||
|
def _compact(self) -> None:
|
||||||
|
"""
|
||||||
|
Compact older events into summaries.
|
||||||
|
|
||||||
|
Preserves:
|
||||||
|
- All error events (if keep_errors=True)
|
||||||
|
- Events marked with preserve=True
|
||||||
|
- Last `keep_recent` events
|
||||||
|
- Milestone summaries of compacted regions
|
||||||
|
"""
|
||||||
|
if len(self.events) <= self.keep_recent:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Split into old and recent
|
||||||
|
old_events = self.events[:-self.keep_recent]
|
||||||
|
recent_events = self.events[-self.keep_recent:]
|
||||||
|
|
||||||
|
# Separate preserved from compactable
|
||||||
|
preserved_events = [e for e in old_events if e.preserve]
|
||||||
|
compactable_events = [e for e in old_events if not e.preserve]
|
||||||
|
|
||||||
|
# Summarize compactable events
|
||||||
|
if compactable_events:
|
||||||
|
summary = self._create_summary(compactable_events)
|
||||||
|
|
||||||
|
compaction_event = ContextEvent(
|
||||||
|
timestamp=compactable_events[0].timestamp,
|
||||||
|
event_type=EventType.COMPACTION,
|
||||||
|
summary=summary,
|
||||||
|
details={
|
||||||
|
"events_compacted": len(compactable_events),
|
||||||
|
"compaction_number": self.compaction_count,
|
||||||
|
"time_range": {
|
||||||
|
"start": compactable_events[0].timestamp.isoformat(),
|
||||||
|
"end": compactable_events[-1].timestamp.isoformat()
|
||||||
|
}
|
||||||
|
},
|
||||||
|
compacted=True
|
||||||
|
)
|
||||||
|
|
||||||
|
self.compaction_count += 1
|
||||||
|
|
||||||
|
# Store compaction statistics
|
||||||
|
self._compaction_stats.append({
|
||||||
|
"compaction_number": self.compaction_count,
|
||||||
|
"events_compacted": len(compactable_events),
|
||||||
|
"summary": summary
|
||||||
|
})
|
||||||
|
|
||||||
|
# Rebuild events list
|
||||||
|
self.events = [compaction_event] + preserved_events + recent_events
|
||||||
|
else:
|
||||||
|
self.events = preserved_events + recent_events
|
||||||
|
|
||||||
|
def _create_summary(self, events: List[ContextEvent]) -> str:
|
||||||
|
"""
|
||||||
|
Create summary of compacted events.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
events: List of events to summarize
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Summary string
|
||||||
|
"""
|
||||||
|
# Collect trial statistics
|
||||||
|
trial_events = [
|
||||||
|
e for e in events
|
||||||
|
if e.event_type in (EventType.TRIAL_COMPLETE, EventType.TRIAL_FAILED)
|
||||||
|
]
|
||||||
|
|
||||||
|
if not trial_events:
|
||||||
|
return f"[{len(events)} events compacted]"
|
||||||
|
|
||||||
|
# Extract trial statistics
|
||||||
|
trial_numbers = []
|
||||||
|
objectives = []
|
||||||
|
failures = 0
|
||||||
|
|
||||||
|
for e in trial_events:
|
||||||
|
if "trial_number" in e.details:
|
||||||
|
trial_numbers.append(e.details["trial_number"])
|
||||||
|
if "objective" in e.details and e.details["objective"] is not None:
|
||||||
|
objectives.append(e.details["objective"])
|
||||||
|
if e.event_type == EventType.TRIAL_FAILED:
|
||||||
|
failures += 1
|
||||||
|
|
||||||
|
if trial_numbers and objectives:
|
||||||
|
return (
|
||||||
|
f"Trials {min(trial_numbers)}-{max(trial_numbers)}: "
|
||||||
|
f"Best={min(objectives):.4g}, "
|
||||||
|
f"Avg={sum(objectives)/len(objectives):.4g}, "
|
||||||
|
f"Failures={failures}"
|
||||||
|
)
|
||||||
|
elif trial_numbers:
|
||||||
|
return f"Trials {min(trial_numbers)}-{max(trial_numbers)} ({failures} failures)"
|
||||||
|
else:
|
||||||
|
return f"[{len(events)} events compacted]"
|
||||||
|
|
||||||
|
def get_context_string(self, include_timestamps: bool = False) -> str:
|
||||||
|
"""
|
||||||
|
Generate context string from events.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
include_timestamps: Whether to include timestamps
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted context string for LLM
|
||||||
|
"""
|
||||||
|
lines = ["## Optimization History", ""]
|
||||||
|
|
||||||
|
for event in self.events:
|
||||||
|
timestamp = ""
|
||||||
|
if include_timestamps:
|
||||||
|
timestamp = f"[{event.timestamp.strftime('%H:%M:%S')}] "
|
||||||
|
|
||||||
|
if event.compacted:
|
||||||
|
lines.append(f"📦 {timestamp}{event.summary}")
|
||||||
|
elif event.event_type == EventType.ERROR:
|
||||||
|
lines.append(f"❌ {timestamp}{event.summary}")
|
||||||
|
elif event.event_type == EventType.WARNING:
|
||||||
|
lines.append(f"⚠️ {timestamp}{event.summary}")
|
||||||
|
elif event.event_type == EventType.MILESTONE:
|
||||||
|
lines.append(f"🎯 {timestamp}{event.summary}")
|
||||||
|
elif event.event_type == EventType.TRIAL_FAILED:
|
||||||
|
lines.append(f"✗ {timestamp}{event.summary}")
|
||||||
|
elif event.event_type == EventType.TRIAL_COMPLETE:
|
||||||
|
lines.append(f"✓ {timestamp}{event.summary}")
|
||||||
|
else:
|
||||||
|
lines.append(f"- {timestamp}{event.summary}")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get compaction statistics."""
|
||||||
|
event_counts = {}
|
||||||
|
for event in self.events:
|
||||||
|
etype = event.event_type.value
|
||||||
|
event_counts[etype] = event_counts.get(etype, 0) + 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_events": len(self.events),
|
||||||
|
"compaction_count": self.compaction_count,
|
||||||
|
"events_by_type": event_counts,
|
||||||
|
"error_events": event_counts.get("error", 0),
|
||||||
|
"compacted_events": len([e for e in self.events if e.compacted]),
|
||||||
|
"preserved_events": len([e for e in self.events if e.preserve]),
|
||||||
|
"compaction_history": self._compaction_stats[-5:] # Last 5
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_recent_events(self, n: int = 10) -> List[ContextEvent]:
|
||||||
|
"""Get the n most recent events."""
|
||||||
|
return self.events[-n:]
|
||||||
|
|
||||||
|
def get_errors(self) -> List[ContextEvent]:
|
||||||
|
"""Get all error events."""
|
||||||
|
return [e for e in self.events if e.event_type == EventType.ERROR]
|
||||||
|
|
||||||
|
def clear(self) -> None:
|
||||||
|
"""Clear all events and reset state."""
|
||||||
|
self.events = []
|
||||||
|
self.compaction_count = 0
|
||||||
|
self._compaction_stats = []
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to dictionary for serialization."""
|
||||||
|
return {
|
||||||
|
"events": [e.to_dict() for e in self.events],
|
||||||
|
"compaction_threshold": self.compaction_threshold,
|
||||||
|
"keep_recent": self.keep_recent,
|
||||||
|
"keep_errors": self.keep_errors,
|
||||||
|
"compaction_count": self.compaction_count,
|
||||||
|
"compaction_stats": self._compaction_stats
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> "CompactionManager":
|
||||||
|
"""Create from dictionary."""
|
||||||
|
manager = cls(
|
||||||
|
compaction_threshold=data.get("compaction_threshold", 50),
|
||||||
|
keep_recent=data.get("keep_recent", 20),
|
||||||
|
keep_errors=data.get("keep_errors", True)
|
||||||
|
)
|
||||||
|
manager.events = [ContextEvent.from_dict(e) for e in data.get("events", [])]
|
||||||
|
manager.compaction_count = data.get("compaction_count", 0)
|
||||||
|
manager._compaction_stats = data.get("compaction_stats", [])
|
||||||
|
return manager
|
||||||
|
|
||||||
|
|
||||||
|
class ContextBudgetManager:
|
||||||
|
"""
|
||||||
|
Manages overall context budget across sessions.
|
||||||
|
|
||||||
|
Tracks:
|
||||||
|
- Token estimates for each context section
|
||||||
|
- Recommendations for context reduction
|
||||||
|
- Budget allocation warnings
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Approximate tokens per character
|
||||||
|
CHARS_PER_TOKEN = 4
|
||||||
|
|
||||||
|
# Default budget allocation (tokens)
|
||||||
|
DEFAULT_BUDGET = {
|
||||||
|
"stable_prefix": 5000,
|
||||||
|
"protocols": 10000,
|
||||||
|
"playbook": 5000,
|
||||||
|
"session_state": 2000,
|
||||||
|
"conversation": 30000,
|
||||||
|
"working_space": 48000,
|
||||||
|
"total": 100000
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, budget: Optional[Dict[str, int]] = None):
|
||||||
|
"""
|
||||||
|
Initialize budget manager.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
budget: Custom budget allocation (uses defaults if not provided)
|
||||||
|
"""
|
||||||
|
self.budget = budget or self.DEFAULT_BUDGET.copy()
|
||||||
|
self._current_usage: Dict[str, int] = {k: 0 for k in self.budget.keys()}
|
||||||
|
|
||||||
|
def estimate_tokens(self, text: str) -> int:
|
||||||
|
"""Estimate token count for text."""
|
||||||
|
return len(text) // self.CHARS_PER_TOKEN
|
||||||
|
|
||||||
|
def update_usage(self, section: str, text: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Update usage for a section.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
section: Budget section name
|
||||||
|
text: Content of the section
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Usage status with warnings if over budget
|
||||||
|
"""
|
||||||
|
tokens = self.estimate_tokens(text)
|
||||||
|
self._current_usage[section] = tokens
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"section": section,
|
||||||
|
"tokens": tokens,
|
||||||
|
"budget": self.budget.get(section, 0),
|
||||||
|
"over_budget": tokens > self.budget.get(section, float('inf'))
|
||||||
|
}
|
||||||
|
|
||||||
|
if result["over_budget"]:
|
||||||
|
result["warning"] = f"{section} exceeds budget by {tokens - self.budget[section]} tokens"
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def get_total_usage(self) -> int:
|
||||||
|
"""Get total token usage across all sections."""
|
||||||
|
return sum(self._current_usage.values())
|
||||||
|
|
||||||
|
def get_status(self) -> Dict[str, Any]:
|
||||||
|
"""Get overall budget status."""
|
||||||
|
total_used = self.get_total_usage()
|
||||||
|
total_budget = self.budget.get("total", 100000)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_used": total_used,
|
||||||
|
"total_budget": total_budget,
|
||||||
|
"utilization": total_used / total_budget,
|
||||||
|
"by_section": {
|
||||||
|
section: {
|
||||||
|
"used": self._current_usage.get(section, 0),
|
||||||
|
"budget": self.budget.get(section, 0),
|
||||||
|
"utilization": (
|
||||||
|
self._current_usage.get(section, 0) / self.budget.get(section, 1)
|
||||||
|
if self.budget.get(section, 0) > 0 else 0
|
||||||
|
)
|
||||||
|
}
|
||||||
|
for section in self.budget.keys()
|
||||||
|
if section != "total"
|
||||||
|
},
|
||||||
|
"recommendations": self._get_recommendations()
|
||||||
|
}
|
||||||
|
|
||||||
|
def _get_recommendations(self) -> List[str]:
|
||||||
|
"""Generate budget recommendations."""
|
||||||
|
recommendations = []
|
||||||
|
total_used = self.get_total_usage()
|
||||||
|
total_budget = self.budget.get("total", 100000)
|
||||||
|
|
||||||
|
if total_used > total_budget * 0.9:
|
||||||
|
recommendations.append("Context usage > 90%. Consider triggering compaction.")
|
||||||
|
|
||||||
|
for section, used in self._current_usage.items():
|
||||||
|
budget = self.budget.get(section, 0)
|
||||||
|
if budget > 0 and used > budget:
|
||||||
|
recommendations.append(
|
||||||
|
f"{section}: {used - budget} tokens over budget. Reduce content."
|
||||||
|
)
|
||||||
|
|
||||||
|
if not recommendations:
|
||||||
|
recommendations.append("Budget healthy.")
|
||||||
|
|
||||||
|
return recommendations
|
||||||
378
optimization_engine/context/feedback_loop.py
Normal file
378
optimization_engine/context/feedback_loop.py
Normal file
@@ -0,0 +1,378 @@
|
|||||||
|
"""
|
||||||
|
Atomizer Feedback Loop - Automated Learning from Execution
|
||||||
|
|
||||||
|
Part of the ACE (Agentic Context Engineering) implementation for Atomizer.
|
||||||
|
|
||||||
|
Connects optimization outcomes to playbook updates using the principle:
|
||||||
|
"Leverage natural execution feedback as the learning signal"
|
||||||
|
|
||||||
|
The feedback loop:
|
||||||
|
1. Observes trial outcomes (success/failure)
|
||||||
|
2. Tracks which playbook items were active during each trial
|
||||||
|
3. Updates helpful/harmful counts based on outcomes
|
||||||
|
4. Commits new insights from the reflector
|
||||||
|
|
||||||
|
This implements true self-improvement: the system gets better
|
||||||
|
at optimization over time by learning from its own execution.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Dict, Any, List, Optional
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .playbook import AtomizerPlaybook, InsightCategory
|
||||||
|
from .reflector import AtomizerReflector, OptimizationOutcome
|
||||||
|
|
||||||
|
|
||||||
|
class FeedbackLoop:
|
||||||
|
"""
|
||||||
|
Automated feedback loop that learns from optimization runs.
|
||||||
|
|
||||||
|
Key insight from ACE: Use execution feedback (success/failure)
|
||||||
|
as the learning signal, not labeled data.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
feedback = FeedbackLoop(playbook_path)
|
||||||
|
|
||||||
|
# After each trial
|
||||||
|
feedback.process_trial_result(
|
||||||
|
trial_number=42,
|
||||||
|
success=True,
|
||||||
|
objective_value=100.5,
|
||||||
|
design_variables={"thickness": 1.5},
|
||||||
|
context_items_used=["str-00001", "mis-00003"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# After study completion
|
||||||
|
result = feedback.finalize_study(study_stats)
|
||||||
|
print(f"Added {result['insights_added']} insights")
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, playbook_path: Path):
|
||||||
|
"""
|
||||||
|
Initialize feedback loop with playbook path.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
playbook_path: Path to the playbook JSON file
|
||||||
|
"""
|
||||||
|
self.playbook_path = playbook_path
|
||||||
|
self.playbook = AtomizerPlaybook.load(playbook_path)
|
||||||
|
self.reflector = AtomizerReflector(self.playbook)
|
||||||
|
|
||||||
|
# Track items used per trial for attribution
|
||||||
|
self._trial_item_usage: Dict[int, List[str]] = {}
|
||||||
|
|
||||||
|
# Track outcomes for batch analysis
|
||||||
|
self._outcomes: List[OptimizationOutcome] = []
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self._total_trials_processed = 0
|
||||||
|
self._successful_trials = 0
|
||||||
|
self._failed_trials = 0
|
||||||
|
|
||||||
|
def process_trial_result(
|
||||||
|
self,
|
||||||
|
trial_number: int,
|
||||||
|
success: bool,
|
||||||
|
objective_value: float,
|
||||||
|
design_variables: Dict[str, float],
|
||||||
|
context_items_used: Optional[List[str]] = None,
|
||||||
|
errors: Optional[List[str]] = None,
|
||||||
|
extractor_used: str = "",
|
||||||
|
duration_seconds: float = 0.0
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Process a trial result and update playbook accordingly.
|
||||||
|
|
||||||
|
This is the core learning mechanism:
|
||||||
|
- If trial succeeded with certain playbook items -> increase helpful count
|
||||||
|
- If trial failed with certain playbook items -> increase harmful count
|
||||||
|
|
||||||
|
Args:
|
||||||
|
trial_number: Trial number
|
||||||
|
success: Whether the trial succeeded
|
||||||
|
objective_value: Objective function value (0 if failed)
|
||||||
|
design_variables: Design variable values used
|
||||||
|
context_items_used: List of playbook item IDs in context
|
||||||
|
errors: List of error messages (if any)
|
||||||
|
extractor_used: Name of extractor used
|
||||||
|
duration_seconds: Trial duration
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with processing results
|
||||||
|
"""
|
||||||
|
context_items_used = context_items_used or []
|
||||||
|
errors = errors or []
|
||||||
|
|
||||||
|
# Update statistics
|
||||||
|
self._total_trials_processed += 1
|
||||||
|
if success:
|
||||||
|
self._successful_trials += 1
|
||||||
|
else:
|
||||||
|
self._failed_trials += 1
|
||||||
|
|
||||||
|
# Track item usage for this trial
|
||||||
|
self._trial_item_usage[trial_number] = context_items_used
|
||||||
|
|
||||||
|
# Update playbook item scores based on outcome
|
||||||
|
items_updated = 0
|
||||||
|
for item_id in context_items_used:
|
||||||
|
if self.playbook.record_outcome(item_id, helpful=success):
|
||||||
|
items_updated += 1
|
||||||
|
|
||||||
|
# Create outcome for reflection
|
||||||
|
outcome = OptimizationOutcome(
|
||||||
|
trial_number=trial_number,
|
||||||
|
success=success,
|
||||||
|
objective_value=objective_value if success else None,
|
||||||
|
constraint_violations=[],
|
||||||
|
solver_errors=errors,
|
||||||
|
design_variables=design_variables,
|
||||||
|
extractor_used=extractor_used,
|
||||||
|
duration_seconds=duration_seconds
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store outcome
|
||||||
|
self._outcomes.append(outcome)
|
||||||
|
|
||||||
|
# Reflect on outcome
|
||||||
|
insights = self.reflector.analyze_trial(outcome)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"trial_number": trial_number,
|
||||||
|
"success": success,
|
||||||
|
"items_updated": items_updated,
|
||||||
|
"insights_extracted": len(insights)
|
||||||
|
}
|
||||||
|
|
||||||
|
def record_error(
|
||||||
|
self,
|
||||||
|
trial_number: int,
|
||||||
|
error_type: str,
|
||||||
|
error_message: str,
|
||||||
|
context_items_used: Optional[List[str]] = None
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Record an error for a trial.
|
||||||
|
|
||||||
|
Separate from process_trial_result for cases where
|
||||||
|
we want to record errors without full trial data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
trial_number: Trial number
|
||||||
|
error_type: Classification of error
|
||||||
|
error_message: Error details
|
||||||
|
context_items_used: Playbook items that were active
|
||||||
|
"""
|
||||||
|
context_items_used = context_items_used or []
|
||||||
|
|
||||||
|
# Mark items as harmful
|
||||||
|
for item_id in context_items_used:
|
||||||
|
self.playbook.record_outcome(item_id, helpful=False)
|
||||||
|
|
||||||
|
# Create insight about the error
|
||||||
|
self.reflector.pending_insights.append({
|
||||||
|
"category": InsightCategory.MISTAKE,
|
||||||
|
"content": f"{error_type}: {error_message[:200]}",
|
||||||
|
"helpful": False,
|
||||||
|
"trial": trial_number
|
||||||
|
})
|
||||||
|
|
||||||
|
def finalize_study(
|
||||||
|
self,
|
||||||
|
study_stats: Dict[str, Any],
|
||||||
|
save_playbook: bool = True
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Called when study completes. Commits insights and prunes playbook.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
study_stats: Dictionary with study statistics:
|
||||||
|
- name: Study name
|
||||||
|
- total_trials: Total trials run
|
||||||
|
- best_value: Best objective achieved
|
||||||
|
- convergence_rate: Success rate (0.0-1.0)
|
||||||
|
- method: Optimization method used
|
||||||
|
save_playbook: Whether to save playbook to disk
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with finalization results
|
||||||
|
"""
|
||||||
|
# Analyze study-level patterns
|
||||||
|
study_insights = self.reflector.analyze_study_completion(
|
||||||
|
study_name=study_stats.get("name", "unknown"),
|
||||||
|
total_trials=study_stats.get("total_trials", 0),
|
||||||
|
best_value=study_stats.get("best_value", 0),
|
||||||
|
convergence_rate=study_stats.get("convergence_rate", 0),
|
||||||
|
method=study_stats.get("method", "")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Commit all pending insights
|
||||||
|
insights_added = self.reflector.commit_insights()
|
||||||
|
|
||||||
|
# Prune consistently harmful items
|
||||||
|
items_pruned = self.playbook.prune_harmful(threshold=-3)
|
||||||
|
|
||||||
|
# Save updated playbook
|
||||||
|
if save_playbook:
|
||||||
|
self.playbook.save(self.playbook_path)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"insights_added": insights_added,
|
||||||
|
"items_pruned": items_pruned,
|
||||||
|
"playbook_size": len(self.playbook.items),
|
||||||
|
"playbook_version": self.playbook.version,
|
||||||
|
"total_trials_processed": self._total_trials_processed,
|
||||||
|
"successful_trials": self._successful_trials,
|
||||||
|
"failed_trials": self._failed_trials,
|
||||||
|
"success_rate": (
|
||||||
|
self._successful_trials / self._total_trials_processed
|
||||||
|
if self._total_trials_processed > 0 else 0
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_item_performance(self) -> Dict[str, Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get performance metrics for all playbook items.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping item IDs to performance stats
|
||||||
|
"""
|
||||||
|
performance = {}
|
||||||
|
for item_id, item in self.playbook.items.items():
|
||||||
|
trials_used_in = [
|
||||||
|
trial for trial, items in self._trial_item_usage.items()
|
||||||
|
if item_id in items
|
||||||
|
]
|
||||||
|
performance[item_id] = {
|
||||||
|
"helpful_count": item.helpful_count,
|
||||||
|
"harmful_count": item.harmful_count,
|
||||||
|
"net_score": item.net_score,
|
||||||
|
"confidence": item.confidence,
|
||||||
|
"trials_used_in": len(trials_used_in),
|
||||||
|
"category": item.category.value,
|
||||||
|
"content_preview": item.content[:100]
|
||||||
|
}
|
||||||
|
return performance
|
||||||
|
|
||||||
|
def get_top_performers(self, n: int = 10) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get the top performing playbook items.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
n: Number of top items to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of item performance dictionaries
|
||||||
|
"""
|
||||||
|
performance = self.get_item_performance()
|
||||||
|
sorted_items = sorted(
|
||||||
|
performance.items(),
|
||||||
|
key=lambda x: x[1]["net_score"],
|
||||||
|
reverse=True
|
||||||
|
)
|
||||||
|
return [
|
||||||
|
{"id": item_id, **stats}
|
||||||
|
for item_id, stats in sorted_items[:n]
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_worst_performers(self, n: int = 10) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get the worst performing playbook items.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
n: Number of worst items to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of item performance dictionaries
|
||||||
|
"""
|
||||||
|
performance = self.get_item_performance()
|
||||||
|
sorted_items = sorted(
|
||||||
|
performance.items(),
|
||||||
|
key=lambda x: x[1]["net_score"]
|
||||||
|
)
|
||||||
|
return [
|
||||||
|
{"id": item_id, **stats}
|
||||||
|
for item_id, stats in sorted_items[:n]
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_statistics(self) -> Dict[str, Any]:
|
||||||
|
"""Get feedback loop statistics."""
|
||||||
|
return {
|
||||||
|
"total_trials_processed": self._total_trials_processed,
|
||||||
|
"successful_trials": self._successful_trials,
|
||||||
|
"failed_trials": self._failed_trials,
|
||||||
|
"success_rate": (
|
||||||
|
self._successful_trials / self._total_trials_processed
|
||||||
|
if self._total_trials_processed > 0 else 0
|
||||||
|
),
|
||||||
|
"playbook_items": len(self.playbook.items),
|
||||||
|
"pending_insights": self.reflector.get_pending_count(),
|
||||||
|
"outcomes_recorded": len(self._outcomes)
|
||||||
|
}
|
||||||
|
|
||||||
|
def export_learning_report(self, path: Path) -> None:
|
||||||
|
"""
|
||||||
|
Export a detailed learning report.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: Path to save the report
|
||||||
|
"""
|
||||||
|
report = {
|
||||||
|
"generated_at": datetime.now().isoformat(),
|
||||||
|
"statistics": self.get_statistics(),
|
||||||
|
"top_performers": self.get_top_performers(20),
|
||||||
|
"worst_performers": self.get_worst_performers(10),
|
||||||
|
"playbook_stats": self.playbook.get_stats(),
|
||||||
|
"outcomes_summary": {
|
||||||
|
"total": len(self._outcomes),
|
||||||
|
"by_success": {
|
||||||
|
"success": len([o for o in self._outcomes if o.success]),
|
||||||
|
"failure": len([o for o in self._outcomes if not o.success])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(path, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(report, f, indent=2)
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
"""Reset the feedback loop state (keeps playbook)."""
|
||||||
|
self._trial_item_usage = {}
|
||||||
|
self._outcomes = []
|
||||||
|
self._total_trials_processed = 0
|
||||||
|
self._successful_trials = 0
|
||||||
|
self._failed_trials = 0
|
||||||
|
self.reflector = AtomizerReflector(self.playbook)
|
||||||
|
|
||||||
|
|
||||||
|
class FeedbackLoopFactory:
|
||||||
|
"""Factory for creating feedback loops."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create_for_study(study_dir: Path) -> FeedbackLoop:
|
||||||
|
"""
|
||||||
|
Create a feedback loop for a specific study.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
study_dir: Path to study directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Configured FeedbackLoop
|
||||||
|
"""
|
||||||
|
playbook_path = study_dir / "3_results" / "playbook.json"
|
||||||
|
return FeedbackLoop(playbook_path)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create_global() -> FeedbackLoop:
|
||||||
|
"""
|
||||||
|
Create a feedback loop using the global playbook.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
FeedbackLoop using global playbook path
|
||||||
|
"""
|
||||||
|
from pathlib import Path
|
||||||
|
playbook_path = Path(__file__).parents[2] / "knowledge_base" / "playbook.json"
|
||||||
|
return FeedbackLoop(playbook_path)
|
||||||
432
optimization_engine/context/playbook.py
Normal file
432
optimization_engine/context/playbook.py
Normal file
@@ -0,0 +1,432 @@
|
|||||||
|
"""
|
||||||
|
Atomizer Playbook - Structured Knowledge Store
|
||||||
|
|
||||||
|
Part of the ACE (Agentic Context Engineering) implementation for Atomizer.
|
||||||
|
Based on ACE framework principles:
|
||||||
|
- Incremental delta updates (never rewrite wholesale)
|
||||||
|
- Helpful/harmful tracking for each insight
|
||||||
|
- Semantic deduplication
|
||||||
|
- Category-based organization
|
||||||
|
|
||||||
|
This module provides the core data structures for accumulating optimization
|
||||||
|
knowledge across sessions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import List, Dict, Optional, Any
|
||||||
|
from enum import Enum
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
|
||||||
|
class InsightCategory(Enum):
|
||||||
|
"""Categories for playbook insights."""
|
||||||
|
STRATEGY = "str" # Optimization strategies
|
||||||
|
CALCULATION = "cal" # Formulas and calculations
|
||||||
|
MISTAKE = "mis" # Common mistakes to avoid
|
||||||
|
TOOL = "tool" # Tool usage patterns
|
||||||
|
DOMAIN = "dom" # Domain-specific knowledge (FEA, NX)
|
||||||
|
WORKFLOW = "wf" # Workflow patterns
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PlaybookItem:
|
||||||
|
"""
|
||||||
|
Single insight in the playbook with helpful/harmful tracking.
|
||||||
|
|
||||||
|
Each item accumulates feedback over time:
|
||||||
|
- helpful_count: Times this insight led to success
|
||||||
|
- harmful_count: Times this insight led to failure
|
||||||
|
- net_score: helpful - harmful (used for ranking)
|
||||||
|
- confidence: helpful / (helpful + harmful)
|
||||||
|
"""
|
||||||
|
id: str
|
||||||
|
category: InsightCategory
|
||||||
|
content: str
|
||||||
|
helpful_count: int = 0
|
||||||
|
harmful_count: int = 0
|
||||||
|
created_at: str = field(default_factory=lambda: datetime.now().isoformat())
|
||||||
|
last_used: Optional[str] = None
|
||||||
|
source_trials: List[int] = field(default_factory=list)
|
||||||
|
tags: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def net_score(self) -> int:
|
||||||
|
"""Net helpfulness score (helpful - harmful)."""
|
||||||
|
return self.helpful_count - self.harmful_count
|
||||||
|
|
||||||
|
@property
|
||||||
|
def confidence(self) -> float:
|
||||||
|
"""Confidence score (0.0-1.0) based on outcome ratio."""
|
||||||
|
total = self.helpful_count + self.harmful_count
|
||||||
|
if total == 0:
|
||||||
|
return 0.5 # Neutral confidence for untested items
|
||||||
|
return self.helpful_count / total
|
||||||
|
|
||||||
|
def to_context_string(self) -> str:
|
||||||
|
"""Format for injection into LLM context."""
|
||||||
|
return f"[{self.id}] helpful={self.helpful_count} harmful={self.harmful_count} :: {self.content}"
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to dictionary for serialization."""
|
||||||
|
return {
|
||||||
|
"id": self.id,
|
||||||
|
"category": self.category.value,
|
||||||
|
"content": self.content,
|
||||||
|
"helpful_count": self.helpful_count,
|
||||||
|
"harmful_count": self.harmful_count,
|
||||||
|
"created_at": self.created_at,
|
||||||
|
"last_used": self.last_used,
|
||||||
|
"source_trials": self.source_trials,
|
||||||
|
"tags": self.tags
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> "PlaybookItem":
|
||||||
|
"""Create from dictionary."""
|
||||||
|
return cls(
|
||||||
|
id=data["id"],
|
||||||
|
category=InsightCategory(data["category"]),
|
||||||
|
content=data["content"],
|
||||||
|
helpful_count=data.get("helpful_count", 0),
|
||||||
|
harmful_count=data.get("harmful_count", 0),
|
||||||
|
created_at=data.get("created_at", ""),
|
||||||
|
last_used=data.get("last_used"),
|
||||||
|
source_trials=data.get("source_trials", []),
|
||||||
|
tags=data.get("tags", [])
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AtomizerPlaybook:
|
||||||
|
"""
|
||||||
|
Evolving playbook that accumulates optimization knowledge.
|
||||||
|
|
||||||
|
Based on ACE framework principles:
|
||||||
|
- Incremental delta updates (never rewrite wholesale)
|
||||||
|
- Helpful/harmful tracking for each insight
|
||||||
|
- Semantic deduplication
|
||||||
|
- Category-based organization
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
playbook = AtomizerPlaybook.load(path)
|
||||||
|
item = playbook.add_insight(InsightCategory.STRATEGY, "Use shell elements for thin walls")
|
||||||
|
playbook.record_outcome(item.id, helpful=True)
|
||||||
|
playbook.save(path)
|
||||||
|
"""
|
||||||
|
items: Dict[str, PlaybookItem] = field(default_factory=dict)
|
||||||
|
version: int = 1
|
||||||
|
last_updated: str = field(default_factory=lambda: datetime.now().isoformat())
|
||||||
|
|
||||||
|
def _generate_id(self, category: InsightCategory) -> str:
|
||||||
|
"""Generate unique ID for new item."""
|
||||||
|
existing = [k for k in self.items.keys() if k.startswith(category.value)]
|
||||||
|
next_num = len(existing) + 1
|
||||||
|
return f"{category.value}-{next_num:05d}"
|
||||||
|
|
||||||
|
def _content_hash(self, content: str) -> str:
|
||||||
|
"""Generate hash for content deduplication."""
|
||||||
|
normalized = content.lower().strip()
|
||||||
|
return hashlib.md5(normalized.encode()).hexdigest()[:12]
|
||||||
|
|
||||||
|
def add_insight(
|
||||||
|
self,
|
||||||
|
category: InsightCategory,
|
||||||
|
content: str,
|
||||||
|
source_trial: Optional[int] = None,
|
||||||
|
tags: Optional[List[str]] = None
|
||||||
|
) -> PlaybookItem:
|
||||||
|
"""
|
||||||
|
Add new insight with delta update (ACE principle).
|
||||||
|
|
||||||
|
Checks for semantic duplicates before adding.
|
||||||
|
If duplicate found, increments helpful_count instead.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
category: Type of insight
|
||||||
|
content: The insight text
|
||||||
|
source_trial: Trial number that generated this insight
|
||||||
|
tags: Optional tags for filtering
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The created or updated PlaybookItem
|
||||||
|
"""
|
||||||
|
content_hash = self._content_hash(content)
|
||||||
|
|
||||||
|
# Check for near-duplicates
|
||||||
|
for item in self.items.values():
|
||||||
|
existing_hash = self._content_hash(item.content)
|
||||||
|
if content_hash == existing_hash:
|
||||||
|
# Update existing instead of adding duplicate
|
||||||
|
item.helpful_count += 1
|
||||||
|
if source_trial and source_trial not in item.source_trials:
|
||||||
|
item.source_trials.append(source_trial)
|
||||||
|
if tags:
|
||||||
|
item.tags = list(set(item.tags + tags))
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
return item
|
||||||
|
|
||||||
|
# Create new item
|
||||||
|
item_id = self._generate_id(category)
|
||||||
|
item = PlaybookItem(
|
||||||
|
id=item_id,
|
||||||
|
category=category,
|
||||||
|
content=content,
|
||||||
|
source_trials=[source_trial] if source_trial else [],
|
||||||
|
tags=tags or []
|
||||||
|
)
|
||||||
|
self.items[item_id] = item
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
self.version += 1
|
||||||
|
return item
|
||||||
|
|
||||||
|
def record_outcome(self, item_id: str, helpful: bool) -> bool:
|
||||||
|
"""
|
||||||
|
Record whether using this insight was helpful or harmful.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
item_id: The playbook item ID
|
||||||
|
helpful: True if outcome was positive, False if negative
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if item was found and updated, False otherwise
|
||||||
|
"""
|
||||||
|
if item_id not in self.items:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if helpful:
|
||||||
|
self.items[item_id].helpful_count += 1
|
||||||
|
else:
|
||||||
|
self.items[item_id].harmful_count += 1
|
||||||
|
self.items[item_id].last_used = datetime.now().isoformat()
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_context_for_task(
|
||||||
|
self,
|
||||||
|
task_type: str,
|
||||||
|
max_items: int = 20,
|
||||||
|
min_confidence: float = 0.5,
|
||||||
|
tags: Optional[List[str]] = None
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Generate context string for LLM consumption.
|
||||||
|
|
||||||
|
Filters by relevance and confidence, sorted by net score.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task_type: Type of task (for filtering)
|
||||||
|
max_items: Maximum items to include
|
||||||
|
min_confidence: Minimum confidence threshold
|
||||||
|
tags: Optional tags to filter by
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted context string for LLM
|
||||||
|
"""
|
||||||
|
relevant_items = [
|
||||||
|
item for item in self.items.values()
|
||||||
|
if item.confidence >= min_confidence
|
||||||
|
]
|
||||||
|
|
||||||
|
# Filter by tags if provided
|
||||||
|
if tags:
|
||||||
|
relevant_items = [
|
||||||
|
item for item in relevant_items
|
||||||
|
if any(tag in item.tags for tag in tags)
|
||||||
|
]
|
||||||
|
|
||||||
|
# Sort by net score (most helpful first)
|
||||||
|
relevant_items.sort(key=lambda x: x.net_score, reverse=True)
|
||||||
|
|
||||||
|
# Group by category
|
||||||
|
sections: Dict[str, List[str]] = {}
|
||||||
|
for item in relevant_items[:max_items]:
|
||||||
|
cat_name = item.category.name
|
||||||
|
if cat_name not in sections:
|
||||||
|
sections[cat_name] = []
|
||||||
|
sections[cat_name].append(item.to_context_string())
|
||||||
|
|
||||||
|
# Build context string
|
||||||
|
lines = ["## Atomizer Knowledge Playbook", ""]
|
||||||
|
for cat_name, items in sections.items():
|
||||||
|
lines.append(f"### {cat_name}")
|
||||||
|
lines.extend(items)
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def search_by_content(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
category: Optional[InsightCategory] = None,
|
||||||
|
limit: int = 5
|
||||||
|
) -> List[PlaybookItem]:
|
||||||
|
"""
|
||||||
|
Search playbook items by content similarity.
|
||||||
|
|
||||||
|
Simple keyword matching - could be enhanced with embeddings.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query: Search query
|
||||||
|
category: Optional category filter
|
||||||
|
limit: Maximum results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of matching items sorted by relevance
|
||||||
|
"""
|
||||||
|
query_lower = query.lower()
|
||||||
|
query_words = set(query_lower.split())
|
||||||
|
|
||||||
|
scored_items = []
|
||||||
|
for item in self.items.values():
|
||||||
|
if category and item.category != category:
|
||||||
|
continue
|
||||||
|
|
||||||
|
content_lower = item.content.lower()
|
||||||
|
content_words = set(content_lower.split())
|
||||||
|
|
||||||
|
# Simple word overlap scoring
|
||||||
|
overlap = len(query_words & content_words)
|
||||||
|
if overlap > 0 or query_lower in content_lower:
|
||||||
|
score = overlap + (1 if query_lower in content_lower else 0)
|
||||||
|
scored_items.append((score, item))
|
||||||
|
|
||||||
|
scored_items.sort(key=lambda x: (-x[0], -x[1].net_score))
|
||||||
|
return [item for _, item in scored_items[:limit]]
|
||||||
|
|
||||||
|
def get_by_category(
|
||||||
|
self,
|
||||||
|
category: InsightCategory,
|
||||||
|
min_score: int = 0
|
||||||
|
) -> List[PlaybookItem]:
|
||||||
|
"""Get all items in a category with minimum net score."""
|
||||||
|
return [
|
||||||
|
item for item in self.items.values()
|
||||||
|
if item.category == category and item.net_score >= min_score
|
||||||
|
]
|
||||||
|
|
||||||
|
def prune_harmful(self, threshold: int = -3) -> int:
|
||||||
|
"""
|
||||||
|
Remove items that have proven consistently harmful.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
threshold: Net score threshold (items at or below are removed)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of items removed
|
||||||
|
"""
|
||||||
|
to_remove = [
|
||||||
|
item_id for item_id, item in self.items.items()
|
||||||
|
if item.net_score <= threshold
|
||||||
|
]
|
||||||
|
for item_id in to_remove:
|
||||||
|
del self.items[item_id]
|
||||||
|
|
||||||
|
if to_remove:
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
self.version += 1
|
||||||
|
|
||||||
|
return len(to_remove)
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get playbook statistics."""
|
||||||
|
by_category = {}
|
||||||
|
for item in self.items.values():
|
||||||
|
cat = item.category.name
|
||||||
|
if cat not in by_category:
|
||||||
|
by_category[cat] = 0
|
||||||
|
by_category[cat] += 1
|
||||||
|
|
||||||
|
scores = [item.net_score for item in self.items.values()]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_items": len(self.items),
|
||||||
|
"by_category": by_category,
|
||||||
|
"version": self.version,
|
||||||
|
"last_updated": self.last_updated,
|
||||||
|
"avg_score": sum(scores) / len(scores) if scores else 0,
|
||||||
|
"max_score": max(scores) if scores else 0,
|
||||||
|
"min_score": min(scores) if scores else 0
|
||||||
|
}
|
||||||
|
|
||||||
|
def save(self, path: Path) -> None:
|
||||||
|
"""
|
||||||
|
Persist playbook to JSON.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: File path to save to
|
||||||
|
"""
|
||||||
|
data = {
|
||||||
|
"version": self.version,
|
||||||
|
"last_updated": self.last_updated,
|
||||||
|
"items": {k: v.to_dict() for k, v in self.items.items()}
|
||||||
|
}
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(path, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(data, f, indent=2)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load(cls, path: Path) -> "AtomizerPlaybook":
|
||||||
|
"""
|
||||||
|
Load playbook from JSON.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: File path to load from
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Loaded playbook (or new empty playbook if file doesn't exist)
|
||||||
|
"""
|
||||||
|
if not path.exists():
|
||||||
|
return cls()
|
||||||
|
|
||||||
|
with open(path, encoding='utf-8') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
playbook = cls(
|
||||||
|
version=data.get("version", 1),
|
||||||
|
last_updated=data.get("last_updated", datetime.now().isoformat())
|
||||||
|
)
|
||||||
|
|
||||||
|
for item_data in data.get("items", {}).values():
|
||||||
|
item = PlaybookItem.from_dict(item_data)
|
||||||
|
playbook.items[item.id] = item
|
||||||
|
|
||||||
|
return playbook
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience function for global playbook access
|
||||||
|
_global_playbook: Optional[AtomizerPlaybook] = None
|
||||||
|
_global_playbook_path: Optional[Path] = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_playbook(path: Optional[Path] = None) -> AtomizerPlaybook:
|
||||||
|
"""
|
||||||
|
Get the global playbook instance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: Optional path to load from (uses default if not provided)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The global AtomizerPlaybook instance
|
||||||
|
"""
|
||||||
|
global _global_playbook, _global_playbook_path
|
||||||
|
|
||||||
|
if path is None:
|
||||||
|
# Default path
|
||||||
|
path = Path(__file__).parents[2] / "knowledge_base" / "playbook.json"
|
||||||
|
|
||||||
|
if _global_playbook is None or _global_playbook_path != path:
|
||||||
|
_global_playbook = AtomizerPlaybook.load(path)
|
||||||
|
_global_playbook_path = path
|
||||||
|
|
||||||
|
return _global_playbook
|
||||||
|
|
||||||
|
|
||||||
|
def save_playbook() -> None:
|
||||||
|
"""Save the global playbook to its path."""
|
||||||
|
global _global_playbook, _global_playbook_path
|
||||||
|
|
||||||
|
if _global_playbook is not None and _global_playbook_path is not None:
|
||||||
|
_global_playbook.save(_global_playbook_path)
|
||||||
467
optimization_engine/context/reflector.py
Normal file
467
optimization_engine/context/reflector.py
Normal file
@@ -0,0 +1,467 @@
|
|||||||
|
"""
|
||||||
|
Atomizer Reflector - Optimization Outcome Analysis
|
||||||
|
|
||||||
|
Part of the ACE (Agentic Context Engineering) implementation for Atomizer.
|
||||||
|
|
||||||
|
The Reflector analyzes optimization outcomes to extract actionable insights:
|
||||||
|
- Examines successful and failed trials
|
||||||
|
- Extracts patterns that led to success/failure
|
||||||
|
- Formats insights for Curator (Playbook) integration
|
||||||
|
|
||||||
|
This implements the "Reflector" role from the ACE framework's
|
||||||
|
Generator -> Reflector -> Curator pipeline.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Dict, Any, List, Optional
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .playbook import AtomizerPlaybook, InsightCategory, PlaybookItem
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class OptimizationOutcome:
|
||||||
|
"""
|
||||||
|
Captured outcome from an optimization trial.
|
||||||
|
|
||||||
|
Contains all information needed to analyze what happened
|
||||||
|
and extract insights for the playbook.
|
||||||
|
"""
|
||||||
|
trial_number: int
|
||||||
|
success: bool
|
||||||
|
objective_value: Optional[float]
|
||||||
|
constraint_violations: List[str] = field(default_factory=list)
|
||||||
|
solver_errors: List[str] = field(default_factory=list)
|
||||||
|
design_variables: Dict[str, float] = field(default_factory=dict)
|
||||||
|
extractor_used: str = ""
|
||||||
|
duration_seconds: float = 0.0
|
||||||
|
notes: str = ""
|
||||||
|
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
||||||
|
|
||||||
|
# Optional metadata
|
||||||
|
solver_type: str = ""
|
||||||
|
mesh_info: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
convergence_info: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to dictionary for serialization."""
|
||||||
|
return {
|
||||||
|
"trial_number": self.trial_number,
|
||||||
|
"success": self.success,
|
||||||
|
"objective_value": self.objective_value,
|
||||||
|
"constraint_violations": self.constraint_violations,
|
||||||
|
"solver_errors": self.solver_errors,
|
||||||
|
"design_variables": self.design_variables,
|
||||||
|
"extractor_used": self.extractor_used,
|
||||||
|
"duration_seconds": self.duration_seconds,
|
||||||
|
"notes": self.notes,
|
||||||
|
"timestamp": self.timestamp,
|
||||||
|
"solver_type": self.solver_type,
|
||||||
|
"mesh_info": self.mesh_info,
|
||||||
|
"convergence_info": self.convergence_info
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class InsightCandidate:
|
||||||
|
"""
|
||||||
|
A candidate insight extracted from trial analysis.
|
||||||
|
|
||||||
|
Not yet committed to playbook - pending review/aggregation.
|
||||||
|
"""
|
||||||
|
category: InsightCategory
|
||||||
|
content: str
|
||||||
|
helpful: bool
|
||||||
|
trial_number: Optional[int] = None
|
||||||
|
confidence: float = 0.5
|
||||||
|
tags: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
class AtomizerReflector:
|
||||||
|
"""
|
||||||
|
Analyzes optimization outcomes and extracts actionable insights.
|
||||||
|
|
||||||
|
Implements the Reflector role from ACE framework:
|
||||||
|
- Examines successful and failed trials
|
||||||
|
- Extracts patterns that led to success/failure
|
||||||
|
- Formats insights for Curator integration
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
playbook = AtomizerPlaybook.load(path)
|
||||||
|
reflector = AtomizerReflector(playbook)
|
||||||
|
|
||||||
|
# After each trial
|
||||||
|
reflector.analyze_trial(outcome)
|
||||||
|
|
||||||
|
# After study completion
|
||||||
|
reflector.analyze_study_completion(stats)
|
||||||
|
|
||||||
|
# Commit insights to playbook
|
||||||
|
count = reflector.commit_insights()
|
||||||
|
playbook.save(path)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Error pattern matchers for insight extraction
|
||||||
|
ERROR_PATTERNS = {
|
||||||
|
"convergence": [
|
||||||
|
r"convergence",
|
||||||
|
r"did not converge",
|
||||||
|
r"iteration limit",
|
||||||
|
r"max iterations"
|
||||||
|
],
|
||||||
|
"mesh": [
|
||||||
|
r"mesh",
|
||||||
|
r"element",
|
||||||
|
r"distorted",
|
||||||
|
r"jacobian",
|
||||||
|
r"negative volume"
|
||||||
|
],
|
||||||
|
"singularity": [
|
||||||
|
r"singular",
|
||||||
|
r"matrix",
|
||||||
|
r"ill-conditioned",
|
||||||
|
r"pivot"
|
||||||
|
],
|
||||||
|
"memory": [
|
||||||
|
r"memory",
|
||||||
|
r"allocation",
|
||||||
|
r"out of memory",
|
||||||
|
r"insufficient"
|
||||||
|
],
|
||||||
|
"license": [
|
||||||
|
r"license",
|
||||||
|
r"checkout",
|
||||||
|
r"unavailable"
|
||||||
|
],
|
||||||
|
"boundary": [
|
||||||
|
r"boundary",
|
||||||
|
r"constraint",
|
||||||
|
r"spc",
|
||||||
|
r"load"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, playbook: AtomizerPlaybook):
|
||||||
|
"""
|
||||||
|
Initialize reflector with target playbook.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
playbook: The playbook to add insights to
|
||||||
|
"""
|
||||||
|
self.playbook = playbook
|
||||||
|
self.pending_insights: List[InsightCandidate] = []
|
||||||
|
self.analyzed_trials: List[int] = []
|
||||||
|
|
||||||
|
def analyze_trial(self, outcome: OptimizationOutcome) -> List[InsightCandidate]:
|
||||||
|
"""
|
||||||
|
Analyze a single trial outcome and extract insights.
|
||||||
|
|
||||||
|
Returns list of insight candidates (not yet added to playbook).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
outcome: The trial outcome to analyze
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of extracted insight candidates
|
||||||
|
"""
|
||||||
|
insights = []
|
||||||
|
self.analyzed_trials.append(outcome.trial_number)
|
||||||
|
|
||||||
|
# Analyze solver errors
|
||||||
|
for error in outcome.solver_errors:
|
||||||
|
error_insights = self._analyze_error(error, outcome)
|
||||||
|
insights.extend(error_insights)
|
||||||
|
|
||||||
|
# Analyze constraint violations
|
||||||
|
for violation in outcome.constraint_violations:
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.MISTAKE,
|
||||||
|
content=f"Constraint violation: {violation}",
|
||||||
|
helpful=False,
|
||||||
|
trial_number=outcome.trial_number,
|
||||||
|
tags=["constraint", "violation"]
|
||||||
|
))
|
||||||
|
|
||||||
|
# Analyze successful patterns
|
||||||
|
if outcome.success and outcome.objective_value is not None:
|
||||||
|
success_insights = self._analyze_success(outcome)
|
||||||
|
insights.extend(success_insights)
|
||||||
|
|
||||||
|
# Analyze duration (performance insights)
|
||||||
|
if outcome.duration_seconds > 0:
|
||||||
|
perf_insights = self._analyze_performance(outcome)
|
||||||
|
insights.extend(perf_insights)
|
||||||
|
|
||||||
|
self.pending_insights.extend(insights)
|
||||||
|
return insights
|
||||||
|
|
||||||
|
def _analyze_error(
|
||||||
|
self,
|
||||||
|
error: str,
|
||||||
|
outcome: OptimizationOutcome
|
||||||
|
) -> List[InsightCandidate]:
|
||||||
|
"""Analyze a solver error and extract relevant insights."""
|
||||||
|
insights = []
|
||||||
|
error_lower = error.lower()
|
||||||
|
|
||||||
|
# Classify error type
|
||||||
|
error_type = "unknown"
|
||||||
|
for etype, patterns in self.ERROR_PATTERNS.items():
|
||||||
|
if any(re.search(p, error_lower) for p in patterns):
|
||||||
|
error_type = etype
|
||||||
|
break
|
||||||
|
|
||||||
|
# Generate insight based on error type
|
||||||
|
if error_type == "convergence":
|
||||||
|
config_summary = self._summarize_config(outcome)
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.MISTAKE,
|
||||||
|
content=f"Convergence failure with {config_summary}. Consider relaxing solver tolerances or reviewing mesh quality.",
|
||||||
|
helpful=False,
|
||||||
|
trial_number=outcome.trial_number,
|
||||||
|
confidence=0.7,
|
||||||
|
tags=["convergence", "solver", error_type]
|
||||||
|
))
|
||||||
|
|
||||||
|
elif error_type == "mesh":
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.MISTAKE,
|
||||||
|
content=f"Mesh-related error: {error[:100]}. Review element quality and mesh density.",
|
||||||
|
helpful=False,
|
||||||
|
trial_number=outcome.trial_number,
|
||||||
|
confidence=0.8,
|
||||||
|
tags=["mesh", "element", error_type]
|
||||||
|
))
|
||||||
|
|
||||||
|
elif error_type == "singularity":
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.MISTAKE,
|
||||||
|
content=f"Matrix singularity detected. Check boundary conditions and constraints for rigid body modes.",
|
||||||
|
helpful=False,
|
||||||
|
trial_number=outcome.trial_number,
|
||||||
|
confidence=0.9,
|
||||||
|
tags=["singularity", "boundary", error_type]
|
||||||
|
))
|
||||||
|
|
||||||
|
elif error_type == "memory":
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.TOOL,
|
||||||
|
content=f"Memory allocation failure. Consider reducing mesh density or using out-of-core solver.",
|
||||||
|
helpful=False,
|
||||||
|
trial_number=outcome.trial_number,
|
||||||
|
confidence=0.8,
|
||||||
|
tags=["memory", "performance", error_type]
|
||||||
|
))
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Generic error insight
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.MISTAKE,
|
||||||
|
content=f"Solver error: {error[:150]}",
|
||||||
|
helpful=False,
|
||||||
|
trial_number=outcome.trial_number,
|
||||||
|
confidence=0.5,
|
||||||
|
tags=["error", error_type]
|
||||||
|
))
|
||||||
|
|
||||||
|
return insights
|
||||||
|
|
||||||
|
def _analyze_success(self, outcome: OptimizationOutcome) -> List[InsightCandidate]:
|
||||||
|
"""Analyze successful trial and extract winning patterns."""
|
||||||
|
insights = []
|
||||||
|
|
||||||
|
# Record successful design variable ranges
|
||||||
|
design_summary = self._summarize_design(outcome)
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.STRATEGY,
|
||||||
|
content=f"Successful design: {design_summary}",
|
||||||
|
helpful=True,
|
||||||
|
trial_number=outcome.trial_number,
|
||||||
|
confidence=0.6,
|
||||||
|
tags=["success", "design"]
|
||||||
|
))
|
||||||
|
|
||||||
|
# Record extractor performance if fast
|
||||||
|
if outcome.duration_seconds > 0 and outcome.duration_seconds < 60:
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.TOOL,
|
||||||
|
content=f"Fast solve ({outcome.duration_seconds:.1f}s) using {outcome.extractor_used}",
|
||||||
|
helpful=True,
|
||||||
|
trial_number=outcome.trial_number,
|
||||||
|
confidence=0.5,
|
||||||
|
tags=["performance", "extractor"]
|
||||||
|
))
|
||||||
|
|
||||||
|
return insights
|
||||||
|
|
||||||
|
def _analyze_performance(self, outcome: OptimizationOutcome) -> List[InsightCandidate]:
|
||||||
|
"""Analyze performance characteristics."""
|
||||||
|
insights = []
|
||||||
|
|
||||||
|
# Flag very slow trials
|
||||||
|
if outcome.duration_seconds > 300: # > 5 minutes
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.TOOL,
|
||||||
|
content=f"Slow trial ({outcome.duration_seconds/60:.1f} min). Consider mesh refinement or solver settings.",
|
||||||
|
helpful=False,
|
||||||
|
trial_number=outcome.trial_number,
|
||||||
|
confidence=0.6,
|
||||||
|
tags=["performance", "slow"]
|
||||||
|
))
|
||||||
|
|
||||||
|
return insights
|
||||||
|
|
||||||
|
def analyze_study_completion(
|
||||||
|
self,
|
||||||
|
study_name: str,
|
||||||
|
total_trials: int,
|
||||||
|
best_value: float,
|
||||||
|
convergence_rate: float,
|
||||||
|
method: str = ""
|
||||||
|
) -> List[InsightCandidate]:
|
||||||
|
"""
|
||||||
|
Analyze completed study and extract high-level insights.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
study_name: Name of the completed study
|
||||||
|
total_trials: Total number of trials run
|
||||||
|
best_value: Best objective value achieved
|
||||||
|
convergence_rate: Fraction of trials that succeeded (0.0-1.0)
|
||||||
|
method: Optimization method used
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of study-level insight candidates
|
||||||
|
"""
|
||||||
|
insights = []
|
||||||
|
|
||||||
|
if convergence_rate > 0.9:
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.STRATEGY,
|
||||||
|
content=f"Study '{study_name}' achieved {convergence_rate:.0%} success rate - configuration is robust for similar problems.",
|
||||||
|
helpful=True,
|
||||||
|
confidence=0.8,
|
||||||
|
tags=["study", "robust", "high_success"]
|
||||||
|
))
|
||||||
|
elif convergence_rate < 0.5:
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.MISTAKE,
|
||||||
|
content=f"Study '{study_name}' had only {convergence_rate:.0%} success rate - review mesh quality and solver settings.",
|
||||||
|
helpful=False,
|
||||||
|
confidence=0.8,
|
||||||
|
tags=["study", "low_success", "needs_review"]
|
||||||
|
))
|
||||||
|
|
||||||
|
# Method-specific insights
|
||||||
|
if method and total_trials > 20:
|
||||||
|
if convergence_rate > 0.8:
|
||||||
|
insights.append(InsightCandidate(
|
||||||
|
category=InsightCategory.STRATEGY,
|
||||||
|
content=f"{method} performed well on '{study_name}' ({convergence_rate:.0%} success, {total_trials} trials).",
|
||||||
|
helpful=True,
|
||||||
|
confidence=0.7,
|
||||||
|
tags=["method", method.lower(), "performance"]
|
||||||
|
))
|
||||||
|
|
||||||
|
self.pending_insights.extend(insights)
|
||||||
|
return insights
|
||||||
|
|
||||||
|
def commit_insights(self, min_confidence: float = 0.0) -> int:
|
||||||
|
"""
|
||||||
|
Commit pending insights to playbook (Curator handoff).
|
||||||
|
|
||||||
|
Aggregates similar insights and adds to playbook with
|
||||||
|
appropriate helpful/harmful counts.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
min_confidence: Minimum confidence threshold to commit
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of insights added to playbook
|
||||||
|
"""
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
for insight in self.pending_insights:
|
||||||
|
if insight.confidence < min_confidence:
|
||||||
|
continue
|
||||||
|
|
||||||
|
item = self.playbook.add_insight(
|
||||||
|
category=insight.category,
|
||||||
|
content=insight.content,
|
||||||
|
source_trial=insight.trial_number,
|
||||||
|
tags=insight.tags
|
||||||
|
)
|
||||||
|
|
||||||
|
# Record initial outcome based on insight nature
|
||||||
|
if not insight.helpful:
|
||||||
|
self.playbook.record_outcome(item.id, helpful=False)
|
||||||
|
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
self.pending_insights = []
|
||||||
|
return count
|
||||||
|
|
||||||
|
def get_pending_count(self) -> int:
|
||||||
|
"""Get number of pending insights."""
|
||||||
|
return len(self.pending_insights)
|
||||||
|
|
||||||
|
def clear_pending(self) -> None:
|
||||||
|
"""Clear pending insights without committing."""
|
||||||
|
self.pending_insights = []
|
||||||
|
|
||||||
|
def _summarize_config(self, outcome: OptimizationOutcome) -> str:
|
||||||
|
"""Create brief config summary for error context."""
|
||||||
|
parts = []
|
||||||
|
if outcome.extractor_used:
|
||||||
|
parts.append(f"extractor={outcome.extractor_used}")
|
||||||
|
parts.append(f"vars={len(outcome.design_variables)}")
|
||||||
|
if outcome.solver_type:
|
||||||
|
parts.append(f"solver={outcome.solver_type}")
|
||||||
|
return ", ".join(parts)
|
||||||
|
|
||||||
|
def _summarize_design(self, outcome: OptimizationOutcome) -> str:
|
||||||
|
"""Create brief design summary."""
|
||||||
|
parts = []
|
||||||
|
if outcome.objective_value is not None:
|
||||||
|
parts.append(f"obj={outcome.objective_value:.4g}")
|
||||||
|
|
||||||
|
# Include up to 3 design variables
|
||||||
|
var_items = list(outcome.design_variables.items())[:3]
|
||||||
|
for k, v in var_items:
|
||||||
|
parts.append(f"{k}={v:.3g}")
|
||||||
|
|
||||||
|
if len(outcome.design_variables) > 3:
|
||||||
|
parts.append(f"(+{len(outcome.design_variables)-3} more)")
|
||||||
|
|
||||||
|
return ", ".join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
class ReflectorFactory:
|
||||||
|
"""Factory for creating reflectors with different configurations."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create_for_study(study_dir: Path) -> AtomizerReflector:
|
||||||
|
"""
|
||||||
|
Create a reflector for a specific study.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
study_dir: Path to the study directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Configured AtomizerReflector
|
||||||
|
"""
|
||||||
|
playbook_path = study_dir / "3_results" / "playbook.json"
|
||||||
|
playbook = AtomizerPlaybook.load(playbook_path)
|
||||||
|
return AtomizerReflector(playbook)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create_global() -> AtomizerReflector:
|
||||||
|
"""
|
||||||
|
Create a reflector using the global playbook.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
AtomizerReflector using global playbook
|
||||||
|
"""
|
||||||
|
from .playbook import get_playbook
|
||||||
|
return AtomizerReflector(get_playbook())
|
||||||
531
optimization_engine/context/runner_integration.py
Normal file
531
optimization_engine/context/runner_integration.py
Normal file
@@ -0,0 +1,531 @@
|
|||||||
|
"""
|
||||||
|
Context Engineering Integration for OptimizationRunner
|
||||||
|
|
||||||
|
Provides integration between the context engineering system and the
|
||||||
|
OptimizationRunner without modifying the core runner code.
|
||||||
|
|
||||||
|
Two approaches are provided:
|
||||||
|
1. ContextEngineeringMixin - Mix into OptimizationRunner subclass
|
||||||
|
2. ContextAwareRunner - Wrapper that adds context engineering
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
# Approach 1: Mixin
|
||||||
|
class MyRunner(ContextEngineeringMixin, OptimizationRunner):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Approach 2: Wrapper
|
||||||
|
runner = OptimizationRunner(...)
|
||||||
|
context_runner = ContextAwareRunner(runner, playbook_path)
|
||||||
|
context_runner.run(...)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Dict, Any, Optional, List, Callable
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
import time
|
||||||
|
|
||||||
|
from .playbook import AtomizerPlaybook, get_playbook
|
||||||
|
from .reflector import AtomizerReflector, OptimizationOutcome
|
||||||
|
from .feedback_loop import FeedbackLoop
|
||||||
|
from .compaction import CompactionManager, EventType
|
||||||
|
from .session_state import AtomizerSessionState, TaskType, get_session
|
||||||
|
|
||||||
|
|
||||||
|
class ContextEngineeringMixin:
|
||||||
|
"""
|
||||||
|
Mixin class to add context engineering to OptimizationRunner.
|
||||||
|
|
||||||
|
Provides:
|
||||||
|
- Automatic playbook loading/saving
|
||||||
|
- Trial outcome reflection
|
||||||
|
- Learning from successes/failures
|
||||||
|
- Session state tracking
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
class MyContextAwareRunner(ContextEngineeringMixin, OptimizationRunner):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.init_context_engineering()
|
||||||
|
"""
|
||||||
|
|
||||||
|
def init_context_engineering(
|
||||||
|
self,
|
||||||
|
playbook_path: Optional[Path] = None,
|
||||||
|
enable_compaction: bool = True,
|
||||||
|
compaction_threshold: int = 50
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Initialize context engineering components.
|
||||||
|
|
||||||
|
Call this in your subclass __init__ after super().__init__().
|
||||||
|
|
||||||
|
Args:
|
||||||
|
playbook_path: Path to playbook JSON (default: output_dir/playbook.json)
|
||||||
|
enable_compaction: Whether to enable context compaction
|
||||||
|
compaction_threshold: Number of events before compaction
|
||||||
|
"""
|
||||||
|
# Determine playbook path
|
||||||
|
if playbook_path is None:
|
||||||
|
playbook_path = getattr(self, 'output_dir', Path('.')) / 'playbook.json'
|
||||||
|
|
||||||
|
self._playbook_path = Path(playbook_path)
|
||||||
|
self._playbook = AtomizerPlaybook.load(self._playbook_path)
|
||||||
|
self._reflector = AtomizerReflector(self._playbook)
|
||||||
|
self._feedback_loop = FeedbackLoop(self._playbook_path)
|
||||||
|
|
||||||
|
# Initialize compaction if enabled
|
||||||
|
self._enable_compaction = enable_compaction
|
||||||
|
if enable_compaction:
|
||||||
|
self._compaction_manager = CompactionManager(
|
||||||
|
compaction_threshold=compaction_threshold,
|
||||||
|
keep_recent=20,
|
||||||
|
keep_errors=True
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self._compaction_manager = None
|
||||||
|
|
||||||
|
# Session state
|
||||||
|
self._session = get_session()
|
||||||
|
self._session.exposed.task_type = TaskType.RUN_OPTIMIZATION
|
||||||
|
|
||||||
|
# Track active playbook items for feedback attribution
|
||||||
|
self._active_playbook_items: List[str] = []
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self._context_stats = {
|
||||||
|
"trials_processed": 0,
|
||||||
|
"insights_generated": 0,
|
||||||
|
"errors_captured": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_relevant_playbook_items(self, max_items: int = 15) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get relevant playbook items for current optimization context.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of playbook item context strings
|
||||||
|
"""
|
||||||
|
context = self._playbook.get_context_for_task(
|
||||||
|
task_type="optimization",
|
||||||
|
max_items=max_items,
|
||||||
|
min_confidence=0.5
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract item IDs for feedback tracking
|
||||||
|
self._active_playbook_items = [
|
||||||
|
item.id for item in self._playbook.items.values()
|
||||||
|
][:max_items]
|
||||||
|
|
||||||
|
return context.split('\n')
|
||||||
|
|
||||||
|
def record_trial_start(self, trial_number: int, design_vars: Dict[str, float]) -> None:
|
||||||
|
"""
|
||||||
|
Record the start of a trial for context tracking.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
trial_number: Trial number
|
||||||
|
design_vars: Design variable values
|
||||||
|
"""
|
||||||
|
if self._compaction_manager:
|
||||||
|
self._compaction_manager.add_event(
|
||||||
|
self._compaction_manager.events.__class__(
|
||||||
|
timestamp=datetime.now(),
|
||||||
|
event_type=EventType.TRIAL_START,
|
||||||
|
summary=f"Trial {trial_number} started",
|
||||||
|
details={"trial_number": trial_number, "design_vars": design_vars}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self._session.add_action(f"Started trial {trial_number}")
|
||||||
|
|
||||||
|
def record_trial_outcome(
|
||||||
|
self,
|
||||||
|
trial_number: int,
|
||||||
|
success: bool,
|
||||||
|
objective_value: Optional[float],
|
||||||
|
design_vars: Dict[str, float],
|
||||||
|
errors: Optional[List[str]] = None,
|
||||||
|
duration_seconds: float = 0.0
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Record the outcome of a trial for learning.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
trial_number: Trial number
|
||||||
|
success: Whether trial succeeded
|
||||||
|
objective_value: Objective value (None if failed)
|
||||||
|
design_vars: Design variable values
|
||||||
|
errors: List of error messages
|
||||||
|
duration_seconds: Trial duration
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with processing results
|
||||||
|
"""
|
||||||
|
errors = errors or []
|
||||||
|
|
||||||
|
# Update compaction manager
|
||||||
|
if self._compaction_manager:
|
||||||
|
self._compaction_manager.add_trial_event(
|
||||||
|
trial_number=trial_number,
|
||||||
|
success=success,
|
||||||
|
objective=objective_value,
|
||||||
|
duration=duration_seconds
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create outcome for reflection
|
||||||
|
outcome = OptimizationOutcome(
|
||||||
|
trial_number=trial_number,
|
||||||
|
success=success,
|
||||||
|
objective_value=objective_value,
|
||||||
|
constraint_violations=[],
|
||||||
|
solver_errors=errors,
|
||||||
|
design_variables=design_vars,
|
||||||
|
extractor_used=getattr(self, '_current_extractor', ''),
|
||||||
|
duration_seconds=duration_seconds
|
||||||
|
)
|
||||||
|
|
||||||
|
# Analyze and generate insights
|
||||||
|
insights = self._reflector.analyze_trial(outcome)
|
||||||
|
|
||||||
|
# Process through feedback loop
|
||||||
|
result = self._feedback_loop.process_trial_result(
|
||||||
|
trial_number=trial_number,
|
||||||
|
success=success,
|
||||||
|
objective_value=objective_value or 0.0,
|
||||||
|
design_variables=design_vars,
|
||||||
|
context_items_used=self._active_playbook_items,
|
||||||
|
errors=errors
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update statistics
|
||||||
|
self._context_stats["trials_processed"] += 1
|
||||||
|
self._context_stats["insights_generated"] += len(insights)
|
||||||
|
|
||||||
|
# Update session state
|
||||||
|
if success:
|
||||||
|
self._session.add_action(
|
||||||
|
f"Trial {trial_number} succeeded: obj={objective_value:.4g}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
error_summary = errors[0][:50] if errors else "unknown"
|
||||||
|
self._session.add_error(f"Trial {trial_number}: {error_summary}")
|
||||||
|
self._context_stats["errors_captured"] += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"insights_extracted": len(insights),
|
||||||
|
"playbook_items_updated": result.get("items_updated", 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
def record_error(self, error_message: str, error_type: str = "") -> None:
|
||||||
|
"""
|
||||||
|
Record an error for learning (outside trial context).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
error_message: Error description
|
||||||
|
error_type: Error classification
|
||||||
|
"""
|
||||||
|
if self._compaction_manager:
|
||||||
|
self._compaction_manager.add_error_event(error_message, error_type)
|
||||||
|
|
||||||
|
self._session.add_error(error_message, error_type)
|
||||||
|
self._context_stats["errors_captured"] += 1
|
||||||
|
|
||||||
|
def finalize_context_engineering(self, study_stats: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Finalize context engineering at end of optimization.
|
||||||
|
|
||||||
|
Commits insights and saves playbook.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
study_stats: Optional study statistics for analysis
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with finalization results
|
||||||
|
"""
|
||||||
|
if study_stats is None:
|
||||||
|
study_stats = {
|
||||||
|
"name": getattr(self, 'study', {}).get('study_name', 'unknown'),
|
||||||
|
"total_trials": self._context_stats["trials_processed"],
|
||||||
|
"best_value": getattr(self, 'best_value', 0),
|
||||||
|
"convergence_rate": 0.8 # Would need actual calculation
|
||||||
|
}
|
||||||
|
|
||||||
|
# Finalize feedback loop
|
||||||
|
result = self._feedback_loop.finalize_study(study_stats)
|
||||||
|
|
||||||
|
# Save playbook
|
||||||
|
self._playbook.save(self._playbook_path)
|
||||||
|
|
||||||
|
# Add compaction stats
|
||||||
|
if self._compaction_manager:
|
||||||
|
result["compaction_stats"] = self._compaction_manager.get_stats()
|
||||||
|
|
||||||
|
result["context_stats"] = self._context_stats
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def get_context_string(self) -> str:
|
||||||
|
"""
|
||||||
|
Get full context string for LLM consumption.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted context string
|
||||||
|
"""
|
||||||
|
parts = []
|
||||||
|
|
||||||
|
# Session state
|
||||||
|
parts.append(self._session.get_llm_context())
|
||||||
|
|
||||||
|
# Playbook items
|
||||||
|
playbook_context = self._playbook.get_context_for_task(
|
||||||
|
task_type="optimization",
|
||||||
|
max_items=15
|
||||||
|
)
|
||||||
|
if playbook_context:
|
||||||
|
parts.append(playbook_context)
|
||||||
|
|
||||||
|
# Compaction history
|
||||||
|
if self._compaction_manager:
|
||||||
|
parts.append(self._compaction_manager.get_context_string())
|
||||||
|
|
||||||
|
return "\n\n---\n\n".join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
class ContextAwareRunner:
|
||||||
|
"""
|
||||||
|
Wrapper that adds context engineering to any OptimizationRunner.
|
||||||
|
|
||||||
|
This approach doesn't require subclassing - it wraps an existing
|
||||||
|
runner instance and intercepts relevant calls.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
runner = OptimizationRunner(...)
|
||||||
|
context_runner = ContextAwareRunner(runner)
|
||||||
|
|
||||||
|
# Use context_runner.run() instead of runner.run()
|
||||||
|
study = context_runner.run(n_trials=50)
|
||||||
|
|
||||||
|
# Get learning report
|
||||||
|
report = context_runner.get_learning_report()
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
runner,
|
||||||
|
playbook_path: Optional[Path] = None,
|
||||||
|
enable_compaction: bool = True
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize context-aware wrapper.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
runner: OptimizationRunner instance to wrap
|
||||||
|
playbook_path: Path to playbook (default: runner's output_dir)
|
||||||
|
enable_compaction: Whether to enable context compaction
|
||||||
|
"""
|
||||||
|
self._runner = runner
|
||||||
|
|
||||||
|
# Determine playbook path
|
||||||
|
if playbook_path is None:
|
||||||
|
playbook_path = runner.output_dir / 'playbook.json'
|
||||||
|
|
||||||
|
self._playbook_path = Path(playbook_path)
|
||||||
|
self._playbook = AtomizerPlaybook.load(self._playbook_path)
|
||||||
|
self._reflector = AtomizerReflector(self._playbook)
|
||||||
|
self._feedback_loop = FeedbackLoop(self._playbook_path)
|
||||||
|
|
||||||
|
# Compaction
|
||||||
|
self._enable_compaction = enable_compaction
|
||||||
|
if enable_compaction:
|
||||||
|
self._compaction = CompactionManager(
|
||||||
|
compaction_threshold=50,
|
||||||
|
keep_recent=20
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self._compaction = None
|
||||||
|
|
||||||
|
# Session
|
||||||
|
self._session = get_session()
|
||||||
|
self._session.exposed.task_type = TaskType.RUN_OPTIMIZATION
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self._stats = {
|
||||||
|
"trials_observed": 0,
|
||||||
|
"successful_trials": 0,
|
||||||
|
"failed_trials": 0,
|
||||||
|
"insights_generated": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Hook into runner's objective function
|
||||||
|
self._original_objective = runner._objective_function
|
||||||
|
runner._objective_function = self._wrapped_objective
|
||||||
|
|
||||||
|
def _wrapped_objective(self, trial) -> float:
|
||||||
|
"""
|
||||||
|
Wrapped objective function that captures outcomes.
|
||||||
|
"""
|
||||||
|
start_time = time.time()
|
||||||
|
trial_number = trial.number
|
||||||
|
|
||||||
|
# Record trial start
|
||||||
|
if self._compaction:
|
||||||
|
from .compaction import ContextEvent
|
||||||
|
self._compaction.add_event(ContextEvent(
|
||||||
|
timestamp=datetime.now(),
|
||||||
|
event_type=EventType.TRIAL_START,
|
||||||
|
summary=f"Trial {trial_number} starting"
|
||||||
|
))
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Run original objective
|
||||||
|
result = self._original_objective(trial)
|
||||||
|
|
||||||
|
# Record success
|
||||||
|
duration = time.time() - start_time
|
||||||
|
self._record_success(trial_number, result, trial.params, duration)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# Record failure
|
||||||
|
duration = time.time() - start_time
|
||||||
|
self._record_failure(trial_number, str(e), trial.params, duration)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _record_success(
|
||||||
|
self,
|
||||||
|
trial_number: int,
|
||||||
|
objective_value: float,
|
||||||
|
params: Dict[str, Any],
|
||||||
|
duration: float
|
||||||
|
) -> None:
|
||||||
|
"""Record successful trial."""
|
||||||
|
self._stats["trials_observed"] += 1
|
||||||
|
self._stats["successful_trials"] += 1
|
||||||
|
|
||||||
|
if self._compaction:
|
||||||
|
self._compaction.add_trial_event(
|
||||||
|
trial_number=trial_number,
|
||||||
|
success=True,
|
||||||
|
objective=objective_value,
|
||||||
|
duration=duration
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process through feedback loop
|
||||||
|
self._feedback_loop.process_trial_result(
|
||||||
|
trial_number=trial_number,
|
||||||
|
success=True,
|
||||||
|
objective_value=objective_value,
|
||||||
|
design_variables=dict(params),
|
||||||
|
context_items_used=list(self._playbook.items.keys())[:10]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update session
|
||||||
|
self._session.add_action(f"Trial {trial_number}: obj={objective_value:.4g}")
|
||||||
|
|
||||||
|
def _record_failure(
|
||||||
|
self,
|
||||||
|
trial_number: int,
|
||||||
|
error: str,
|
||||||
|
params: Dict[str, Any],
|
||||||
|
duration: float
|
||||||
|
) -> None:
|
||||||
|
"""Record failed trial."""
|
||||||
|
self._stats["trials_observed"] += 1
|
||||||
|
self._stats["failed_trials"] += 1
|
||||||
|
|
||||||
|
if self._compaction:
|
||||||
|
self._compaction.add_trial_event(
|
||||||
|
trial_number=trial_number,
|
||||||
|
success=False,
|
||||||
|
duration=duration
|
||||||
|
)
|
||||||
|
self._compaction.add_error_event(error, "trial_failure")
|
||||||
|
|
||||||
|
# Process through feedback loop
|
||||||
|
self._feedback_loop.process_trial_result(
|
||||||
|
trial_number=trial_number,
|
||||||
|
success=False,
|
||||||
|
objective_value=0.0,
|
||||||
|
design_variables=dict(params),
|
||||||
|
errors=[error]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update session
|
||||||
|
self._session.add_error(f"Trial {trial_number}: {error[:100]}")
|
||||||
|
|
||||||
|
def run(self, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Run optimization with context engineering.
|
||||||
|
|
||||||
|
Passes through to wrapped runner.run() with context tracking.
|
||||||
|
"""
|
||||||
|
# Update session state
|
||||||
|
study_name = kwargs.get('study_name', 'unknown')
|
||||||
|
self._session.exposed.study_name = study_name
|
||||||
|
self._session.exposed.study_status = "running"
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Run optimization
|
||||||
|
result = self._runner.run(*args, **kwargs)
|
||||||
|
|
||||||
|
# Finalize context engineering
|
||||||
|
self._finalize(study_name)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self._session.add_error(f"Study failed: {str(e)}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _finalize(self, study_name: str) -> None:
|
||||||
|
"""Finalize context engineering after optimization."""
|
||||||
|
total_trials = self._stats["trials_observed"]
|
||||||
|
success_rate = (
|
||||||
|
self._stats["successful_trials"] / total_trials
|
||||||
|
if total_trials > 0 else 0
|
||||||
|
)
|
||||||
|
|
||||||
|
# Finalize feedback loop
|
||||||
|
result = self._feedback_loop.finalize_study({
|
||||||
|
"name": study_name,
|
||||||
|
"total_trials": total_trials,
|
||||||
|
"best_value": getattr(self._runner, 'best_value', 0),
|
||||||
|
"convergence_rate": success_rate
|
||||||
|
})
|
||||||
|
|
||||||
|
self._stats["insights_generated"] = result.get("insights_added", 0)
|
||||||
|
|
||||||
|
# Update session
|
||||||
|
self._session.exposed.study_status = "completed"
|
||||||
|
self._session.exposed.trials_completed = total_trials
|
||||||
|
|
||||||
|
def get_learning_report(self) -> Dict[str, Any]:
|
||||||
|
"""Get report on what the system learned."""
|
||||||
|
return {
|
||||||
|
"statistics": self._stats,
|
||||||
|
"playbook_size": len(self._playbook.items),
|
||||||
|
"playbook_stats": self._playbook.get_stats(),
|
||||||
|
"feedback_stats": self._feedback_loop.get_statistics(),
|
||||||
|
"top_insights": self._feedback_loop.get_top_performers(10),
|
||||||
|
"compaction_stats": (
|
||||||
|
self._compaction.get_stats() if self._compaction else None
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_context(self) -> str:
|
||||||
|
"""Get current context string for LLM."""
|
||||||
|
parts = [self._session.get_llm_context()]
|
||||||
|
|
||||||
|
if self._compaction:
|
||||||
|
parts.append(self._compaction.get_context_string())
|
||||||
|
|
||||||
|
playbook_context = self._playbook.get_context_for_task("optimization")
|
||||||
|
if playbook_context:
|
||||||
|
parts.append(playbook_context)
|
||||||
|
|
||||||
|
return "\n\n---\n\n".join(parts)
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
"""Delegate unknown attributes to wrapped runner."""
|
||||||
|
return getattr(self._runner, name)
|
||||||
463
optimization_engine/context/session_state.py
Normal file
463
optimization_engine/context/session_state.py
Normal file
@@ -0,0 +1,463 @@
|
|||||||
|
"""
|
||||||
|
Atomizer Session State - Context Isolation Management
|
||||||
|
|
||||||
|
Part of the ACE (Agentic Context Engineering) implementation for Atomizer.
|
||||||
|
|
||||||
|
Implements the "Write-Select-Compress-Isolate" pattern:
|
||||||
|
- Exposed fields are sent to LLM at every turn
|
||||||
|
- Isolated fields are accessed selectively when needed
|
||||||
|
- Automatic compression of old data
|
||||||
|
|
||||||
|
This ensures efficient context usage while maintaining
|
||||||
|
access to full historical data when needed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from datetime import datetime
|
||||||
|
from enum import Enum
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
class TaskType(Enum):
|
||||||
|
"""Types of tasks Claude can perform in Atomizer."""
|
||||||
|
CREATE_STUDY = "create_study"
|
||||||
|
RUN_OPTIMIZATION = "run_optimization"
|
||||||
|
MONITOR_PROGRESS = "monitor_progress"
|
||||||
|
ANALYZE_RESULTS = "analyze_results"
|
||||||
|
DEBUG_ERROR = "debug_error"
|
||||||
|
CONFIGURE_SETTINGS = "configure_settings"
|
||||||
|
EXPORT_DATA = "export_data"
|
||||||
|
NEURAL_ACCELERATION = "neural_acceleration"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ExposedState:
|
||||||
|
"""
|
||||||
|
State exposed to LLM at every turn.
|
||||||
|
|
||||||
|
Keep this minimal - only what's needed for immediate context.
|
||||||
|
Everything here counts against token budget every turn.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Current task context
|
||||||
|
task_type: Optional[TaskType] = None
|
||||||
|
current_objective: str = ""
|
||||||
|
|
||||||
|
# Recent history (compressed)
|
||||||
|
recent_actions: List[str] = field(default_factory=list)
|
||||||
|
recent_errors: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
# Active study summary
|
||||||
|
study_name: Optional[str] = None
|
||||||
|
study_status: str = "unknown"
|
||||||
|
trials_completed: int = 0
|
||||||
|
trials_total: int = 0
|
||||||
|
best_value: Optional[float] = None
|
||||||
|
best_trial: Optional[int] = None
|
||||||
|
|
||||||
|
# Playbook excerpt (most relevant items)
|
||||||
|
active_playbook_items: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
# Constraints for context size
|
||||||
|
MAX_ACTIONS: int = 10
|
||||||
|
MAX_ERRORS: int = 5
|
||||||
|
MAX_PLAYBOOK_ITEMS: int = 15
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class IsolatedState:
|
||||||
|
"""
|
||||||
|
State isolated from LLM - accessed selectively.
|
||||||
|
|
||||||
|
This data is NOT included in every context window.
|
||||||
|
Load specific fields when explicitly needed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Full optimization history (can be large)
|
||||||
|
full_trial_history: List[Dict[str, Any]] = field(default_factory=list)
|
||||||
|
|
||||||
|
# NX session state (heavy, complex)
|
||||||
|
nx_model_path: Optional[str] = None
|
||||||
|
nx_expressions: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
nx_sim_path: Optional[str] = None
|
||||||
|
|
||||||
|
# Neural network cache
|
||||||
|
neural_predictions: Dict[str, float] = field(default_factory=dict)
|
||||||
|
surrogate_model_path: Optional[str] = None
|
||||||
|
|
||||||
|
# Full playbook (loaded on demand)
|
||||||
|
full_playbook_path: Optional[str] = None
|
||||||
|
|
||||||
|
# Debug information
|
||||||
|
last_solver_output: str = ""
|
||||||
|
last_f06_content: str = ""
|
||||||
|
last_solver_returncode: Optional[int] = None
|
||||||
|
|
||||||
|
# Configuration snapshots
|
||||||
|
optimization_config: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
study_config: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AtomizerSessionState:
|
||||||
|
"""
|
||||||
|
Complete session state with exposure control.
|
||||||
|
|
||||||
|
The exposed state is automatically injected into every LLM context.
|
||||||
|
The isolated state is accessed only when explicitly needed.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
session = AtomizerSessionState(session_id="session_001")
|
||||||
|
session.exposed.task_type = TaskType.CREATE_STUDY
|
||||||
|
session.add_action("Created study directory")
|
||||||
|
|
||||||
|
# Get context for LLM
|
||||||
|
context = session.get_llm_context()
|
||||||
|
|
||||||
|
# Access isolated data when needed
|
||||||
|
f06 = session.load_isolated_data("last_f06_content")
|
||||||
|
"""
|
||||||
|
|
||||||
|
session_id: str
|
||||||
|
created_at: str = field(default_factory=lambda: datetime.now().isoformat())
|
||||||
|
last_updated: str = field(default_factory=lambda: datetime.now().isoformat())
|
||||||
|
|
||||||
|
exposed: ExposedState = field(default_factory=ExposedState)
|
||||||
|
isolated: IsolatedState = field(default_factory=IsolatedState)
|
||||||
|
|
||||||
|
def get_llm_context(self) -> str:
|
||||||
|
"""
|
||||||
|
Generate context string for LLM consumption.
|
||||||
|
|
||||||
|
Only includes exposed state - isolated state requires
|
||||||
|
explicit access via load_isolated_data().
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted markdown context string
|
||||||
|
"""
|
||||||
|
lines = [
|
||||||
|
"## Current Session State",
|
||||||
|
"",
|
||||||
|
f"**Task**: {self.exposed.task_type.value if self.exposed.task_type else 'Not set'}",
|
||||||
|
f"**Objective**: {self.exposed.current_objective or 'None specified'}",
|
||||||
|
"",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Study context
|
||||||
|
if self.exposed.study_name:
|
||||||
|
progress = ""
|
||||||
|
if self.exposed.trials_total > 0:
|
||||||
|
pct = (self.exposed.trials_completed / self.exposed.trials_total) * 100
|
||||||
|
progress = f" ({pct:.0f}%)"
|
||||||
|
|
||||||
|
lines.extend([
|
||||||
|
f"### Active Study: {self.exposed.study_name}",
|
||||||
|
f"- Status: {self.exposed.study_status}",
|
||||||
|
f"- Trials: {self.exposed.trials_completed}/{self.exposed.trials_total}{progress}",
|
||||||
|
])
|
||||||
|
|
||||||
|
if self.exposed.best_value is not None:
|
||||||
|
lines.append(f"- Best: {self.exposed.best_value:.6g} (trial #{self.exposed.best_trial})")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Recent actions
|
||||||
|
if self.exposed.recent_actions:
|
||||||
|
lines.append("### Recent Actions")
|
||||||
|
for action in self.exposed.recent_actions[-5:]:
|
||||||
|
lines.append(f"- {action}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Recent errors (highlight these)
|
||||||
|
if self.exposed.recent_errors:
|
||||||
|
lines.append("### Recent Errors (address these)")
|
||||||
|
for error in self.exposed.recent_errors:
|
||||||
|
lines.append(f"- {error}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Relevant playbook items
|
||||||
|
if self.exposed.active_playbook_items:
|
||||||
|
lines.append("### Relevant Knowledge")
|
||||||
|
for item in self.exposed.active_playbook_items:
|
||||||
|
lines.append(f"- {item}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def add_action(self, action: str) -> None:
|
||||||
|
"""
|
||||||
|
Record an action (auto-compresses old actions).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
action: Description of the action taken
|
||||||
|
"""
|
||||||
|
timestamp = datetime.now().strftime("%H:%M:%S")
|
||||||
|
self.exposed.recent_actions.append(f"[{timestamp}] {action}")
|
||||||
|
|
||||||
|
# Compress if over limit
|
||||||
|
if len(self.exposed.recent_actions) > self.exposed.MAX_ACTIONS:
|
||||||
|
# Keep first, summarize middle, keep last 5
|
||||||
|
first = self.exposed.recent_actions[0]
|
||||||
|
last_five = self.exposed.recent_actions[-5:]
|
||||||
|
middle_count = len(self.exposed.recent_actions) - 6
|
||||||
|
|
||||||
|
self.exposed.recent_actions = (
|
||||||
|
[first] +
|
||||||
|
[f"... ({middle_count} earlier actions)"] +
|
||||||
|
last_five
|
||||||
|
)
|
||||||
|
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
|
||||||
|
def add_error(self, error: str, error_type: str = "") -> None:
|
||||||
|
"""
|
||||||
|
Record an error for LLM attention.
|
||||||
|
|
||||||
|
Errors are preserved more aggressively than actions
|
||||||
|
because they need to be addressed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
error: Error message
|
||||||
|
error_type: Optional error classification
|
||||||
|
"""
|
||||||
|
prefix = f"[{error_type}] " if error_type else ""
|
||||||
|
self.exposed.recent_errors.append(f"{prefix}{error}")
|
||||||
|
|
||||||
|
# Keep most recent errors
|
||||||
|
self.exposed.recent_errors = self.exposed.recent_errors[-self.exposed.MAX_ERRORS:]
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
|
||||||
|
def clear_errors(self) -> None:
|
||||||
|
"""Clear all recorded errors (after they're addressed)."""
|
||||||
|
self.exposed.recent_errors = []
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
|
||||||
|
def update_study_status(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
status: str,
|
||||||
|
trials_completed: int,
|
||||||
|
trials_total: int,
|
||||||
|
best_value: Optional[float] = None,
|
||||||
|
best_trial: Optional[int] = None
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Update the study status in exposed state.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Study name
|
||||||
|
status: Current status (running, completed, failed, etc.)
|
||||||
|
trials_completed: Number of completed trials
|
||||||
|
trials_total: Total planned trials
|
||||||
|
best_value: Best objective value found
|
||||||
|
best_trial: Trial number with best value
|
||||||
|
"""
|
||||||
|
self.exposed.study_name = name
|
||||||
|
self.exposed.study_status = status
|
||||||
|
self.exposed.trials_completed = trials_completed
|
||||||
|
self.exposed.trials_total = trials_total
|
||||||
|
self.exposed.best_value = best_value
|
||||||
|
self.exposed.best_trial = best_trial
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
|
||||||
|
def set_playbook_items(self, items: List[str]) -> None:
|
||||||
|
"""
|
||||||
|
Set the active playbook items for context.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
items: List of playbook item context strings
|
||||||
|
"""
|
||||||
|
self.exposed.active_playbook_items = items[:self.exposed.MAX_PLAYBOOK_ITEMS]
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
|
||||||
|
def load_isolated_data(self, key: str) -> Any:
|
||||||
|
"""
|
||||||
|
Explicitly load isolated data when needed.
|
||||||
|
|
||||||
|
Use this when you need access to heavy data that
|
||||||
|
shouldn't be in every context window.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
key: Attribute name in IsolatedState
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The isolated data value, or None if not found
|
||||||
|
"""
|
||||||
|
return getattr(self.isolated, key, None)
|
||||||
|
|
||||||
|
def set_isolated_data(self, key: str, value: Any) -> None:
|
||||||
|
"""
|
||||||
|
Set isolated data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
key: Attribute name in IsolatedState
|
||||||
|
value: Value to set
|
||||||
|
"""
|
||||||
|
if hasattr(self.isolated, key):
|
||||||
|
setattr(self.isolated, key, value)
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
|
||||||
|
def add_trial_to_history(self, trial_data: Dict[str, Any]) -> None:
|
||||||
|
"""
|
||||||
|
Add a trial to the full history (isolated state).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
trial_data: Dictionary with trial information
|
||||||
|
"""
|
||||||
|
trial_data["recorded_at"] = datetime.now().isoformat()
|
||||||
|
self.isolated.full_trial_history.append(trial_data)
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
|
||||||
|
def get_trial_history_summary(self, last_n: int = 10) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get summary of recent trials from isolated history.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
last_n: Number of recent trials to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of trial summary dictionaries
|
||||||
|
"""
|
||||||
|
return self.isolated.full_trial_history[-last_n:]
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to dictionary for serialization."""
|
||||||
|
return {
|
||||||
|
"session_id": self.session_id,
|
||||||
|
"created_at": self.created_at,
|
||||||
|
"last_updated": self.last_updated,
|
||||||
|
"exposed": {
|
||||||
|
"task_type": self.exposed.task_type.value if self.exposed.task_type else None,
|
||||||
|
"current_objective": self.exposed.current_objective,
|
||||||
|
"recent_actions": self.exposed.recent_actions,
|
||||||
|
"recent_errors": self.exposed.recent_errors,
|
||||||
|
"study_name": self.exposed.study_name,
|
||||||
|
"study_status": self.exposed.study_status,
|
||||||
|
"trials_completed": self.exposed.trials_completed,
|
||||||
|
"trials_total": self.exposed.trials_total,
|
||||||
|
"best_value": self.exposed.best_value,
|
||||||
|
"best_trial": self.exposed.best_trial,
|
||||||
|
"active_playbook_items": self.exposed.active_playbook_items
|
||||||
|
},
|
||||||
|
"isolated": {
|
||||||
|
"nx_model_path": self.isolated.nx_model_path,
|
||||||
|
"nx_sim_path": self.isolated.nx_sim_path,
|
||||||
|
"surrogate_model_path": self.isolated.surrogate_model_path,
|
||||||
|
"full_playbook_path": self.isolated.full_playbook_path,
|
||||||
|
"trial_history_count": len(self.isolated.full_trial_history)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def save(self, path: Path) -> None:
|
||||||
|
"""
|
||||||
|
Save session state to JSON.
|
||||||
|
|
||||||
|
Note: Full trial history is saved to a separate file
|
||||||
|
to keep the main state file manageable.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: Path to save state file
|
||||||
|
"""
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Save main state
|
||||||
|
with open(path, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(self.to_dict(), f, indent=2)
|
||||||
|
|
||||||
|
# Save trial history separately if large
|
||||||
|
if len(self.isolated.full_trial_history) > 0:
|
||||||
|
history_path = path.with_suffix('.history.json')
|
||||||
|
with open(history_path, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(self.isolated.full_trial_history, f, indent=2)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load(cls, path: Path) -> "AtomizerSessionState":
|
||||||
|
"""
|
||||||
|
Load session state from JSON.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: Path to state file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Loaded session state (or new state if file doesn't exist)
|
||||||
|
"""
|
||||||
|
if not path.exists():
|
||||||
|
return cls(session_id=f"session_{datetime.now().strftime('%Y%m%d_%H%M%S')}")
|
||||||
|
|
||||||
|
with open(path, encoding='utf-8') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
state = cls(
|
||||||
|
session_id=data.get("session_id", "unknown"),
|
||||||
|
created_at=data.get("created_at", datetime.now().isoformat()),
|
||||||
|
last_updated=data.get("last_updated", datetime.now().isoformat())
|
||||||
|
)
|
||||||
|
|
||||||
|
# Load exposed state
|
||||||
|
exposed = data.get("exposed", {})
|
||||||
|
if exposed.get("task_type"):
|
||||||
|
state.exposed.task_type = TaskType(exposed["task_type"])
|
||||||
|
state.exposed.current_objective = exposed.get("current_objective", "")
|
||||||
|
state.exposed.recent_actions = exposed.get("recent_actions", [])
|
||||||
|
state.exposed.recent_errors = exposed.get("recent_errors", [])
|
||||||
|
state.exposed.study_name = exposed.get("study_name")
|
||||||
|
state.exposed.study_status = exposed.get("study_status", "unknown")
|
||||||
|
state.exposed.trials_completed = exposed.get("trials_completed", 0)
|
||||||
|
state.exposed.trials_total = exposed.get("trials_total", 0)
|
||||||
|
state.exposed.best_value = exposed.get("best_value")
|
||||||
|
state.exposed.best_trial = exposed.get("best_trial")
|
||||||
|
state.exposed.active_playbook_items = exposed.get("active_playbook_items", [])
|
||||||
|
|
||||||
|
# Load isolated state metadata
|
||||||
|
isolated = data.get("isolated", {})
|
||||||
|
state.isolated.nx_model_path = isolated.get("nx_model_path")
|
||||||
|
state.isolated.nx_sim_path = isolated.get("nx_sim_path")
|
||||||
|
state.isolated.surrogate_model_path = isolated.get("surrogate_model_path")
|
||||||
|
state.isolated.full_playbook_path = isolated.get("full_playbook_path")
|
||||||
|
|
||||||
|
# Load trial history from separate file if exists
|
||||||
|
history_path = path.with_suffix('.history.json')
|
||||||
|
if history_path.exists():
|
||||||
|
with open(history_path, encoding='utf-8') as f:
|
||||||
|
state.isolated.full_trial_history = json.load(f)
|
||||||
|
|
||||||
|
return state
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience functions for session management
|
||||||
|
_active_session: Optional[AtomizerSessionState] = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_session() -> AtomizerSessionState:
|
||||||
|
"""
|
||||||
|
Get the active session state.
|
||||||
|
|
||||||
|
Creates a new session if none exists.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The active AtomizerSessionState
|
||||||
|
"""
|
||||||
|
global _active_session
|
||||||
|
if _active_session is None:
|
||||||
|
_active_session = AtomizerSessionState(
|
||||||
|
session_id=f"session_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
||||||
|
)
|
||||||
|
return _active_session
|
||||||
|
|
||||||
|
|
||||||
|
def set_session(session: AtomizerSessionState) -> None:
|
||||||
|
"""
|
||||||
|
Set the active session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session: Session state to make active
|
||||||
|
"""
|
||||||
|
global _active_session
|
||||||
|
_active_session = session
|
||||||
|
|
||||||
|
|
||||||
|
def clear_session() -> None:
|
||||||
|
"""Clear the active session."""
|
||||||
|
global _active_session
|
||||||
|
_active_session = None
|
||||||
64
optimization_engine/core/__init__.py
Normal file
64
optimization_engine/core/__init__.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
"""
|
||||||
|
Optimization Engine Core
|
||||||
|
========================
|
||||||
|
|
||||||
|
Main optimization runners and algorithm selection.
|
||||||
|
|
||||||
|
Modules:
|
||||||
|
- runner: Main OptimizationRunner class
|
||||||
|
- base_runner: BaseRunner abstract class
|
||||||
|
- intelligent_optimizer: IMSO adaptive optimizer
|
||||||
|
- method_selector: Algorithm selection logic
|
||||||
|
- strategy_selector: Strategy portfolio management
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Lazy imports to avoid circular dependencies
|
||||||
|
def __getattr__(name):
|
||||||
|
if name == 'OptimizationRunner':
|
||||||
|
from .runner import OptimizationRunner
|
||||||
|
return OptimizationRunner
|
||||||
|
elif name == 'BaseRunner':
|
||||||
|
from .base_runner import BaseRunner
|
||||||
|
return BaseRunner
|
||||||
|
elif name == 'NeuralOptimizationRunner':
|
||||||
|
from .runner_with_neural import NeuralOptimizationRunner
|
||||||
|
return NeuralOptimizationRunner
|
||||||
|
elif name == 'IntelligentOptimizer':
|
||||||
|
from .intelligent_optimizer import IntelligentOptimizer
|
||||||
|
return IntelligentOptimizer
|
||||||
|
elif name == 'IMSO':
|
||||||
|
from .intelligent_optimizer import IMSO
|
||||||
|
return IMSO
|
||||||
|
elif name == 'MethodSelector':
|
||||||
|
from .method_selector import MethodSelector
|
||||||
|
return MethodSelector
|
||||||
|
elif name == 'select_method':
|
||||||
|
from .method_selector import select_method
|
||||||
|
return select_method
|
||||||
|
elif name == 'StrategySelector':
|
||||||
|
from .strategy_selector import StrategySelector
|
||||||
|
return StrategySelector
|
||||||
|
elif name == 'StrategyPortfolio':
|
||||||
|
from .strategy_portfolio import StrategyPortfolio
|
||||||
|
return StrategyPortfolio
|
||||||
|
elif name == 'GradientOptimizer':
|
||||||
|
from .gradient_optimizer import GradientOptimizer
|
||||||
|
return GradientOptimizer
|
||||||
|
elif name == 'LBFGSPolisher':
|
||||||
|
from .gradient_optimizer import LBFGSPolisher
|
||||||
|
return LBFGSPolisher
|
||||||
|
raise AttributeError(f"module 'optimization_engine.core' has no attribute '{name}'")
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'OptimizationRunner',
|
||||||
|
'BaseRunner',
|
||||||
|
'NeuralOptimizationRunner',
|
||||||
|
'IntelligentOptimizer',
|
||||||
|
'IMSO',
|
||||||
|
'MethodSelector',
|
||||||
|
'select_method',
|
||||||
|
'StrategySelector',
|
||||||
|
'StrategyPortfolio',
|
||||||
|
'GradientOptimizer',
|
||||||
|
'LBFGSPolisher',
|
||||||
|
]
|
||||||
@@ -6,13 +6,13 @@ by providing a config-driven optimization runner.
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
# In study's run_optimization.py (now ~50 lines instead of ~300):
|
# In study's run_optimization.py (now ~50 lines instead of ~300):
|
||||||
from optimization_engine.base_runner import ConfigDrivenRunner
|
from optimization_engine.core.base_runner import ConfigDrivenRunner
|
||||||
|
|
||||||
runner = ConfigDrivenRunner(__file__)
|
runner = ConfigDrivenRunner(__file__)
|
||||||
runner.run()
|
runner.run()
|
||||||
|
|
||||||
Or for custom extraction logic:
|
Or for custom extraction logic:
|
||||||
from optimization_engine.base_runner import BaseOptimizationRunner
|
from optimization_engine.core.base_runner import BaseOptimizationRunner
|
||||||
|
|
||||||
class MyStudyRunner(BaseOptimizationRunner):
|
class MyStudyRunner(BaseOptimizationRunner):
|
||||||
def extract_objectives(self, op2_file, dat_file, design_vars):
|
def extract_objectives(self, op2_file, dat_file, design_vars):
|
||||||
@@ -164,8 +164,8 @@ class BaseOptimizationRunner(ABC):
|
|||||||
if str(project_root) not in sys.path:
|
if str(project_root) not in sys.path:
|
||||||
sys.path.insert(0, str(project_root))
|
sys.path.insert(0, str(project_root))
|
||||||
|
|
||||||
from optimization_engine.nx_solver import NXSolver
|
from optimization_engine.nx.solver import NXSolver
|
||||||
from optimization_engine.logger import get_logger
|
from optimization_engine.utils.logger import get_logger
|
||||||
|
|
||||||
self.results_dir.mkdir(exist_ok=True)
|
self.results_dir.mkdir(exist_ok=True)
|
||||||
self.logger = get_logger(self.study_name, study_dir=self.results_dir)
|
self.logger = get_logger(self.study_name, study_dir=self.results_dir)
|
||||||
@@ -10,8 +10,8 @@ Key Advantages over Derivative-Free Methods:
|
|||||||
- Can find precise local optima that sampling-based methods miss
|
- Can find precise local optima that sampling-based methods miss
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
from optimization_engine.gradient_optimizer import GradientOptimizer
|
from optimization_engine.core.gradient_optimizer import GradientOptimizer
|
||||||
from optimization_engine.generic_surrogate import GenericSurrogate
|
from optimization_engine.processors.surrogates.generic_surrogate import GenericSurrogate
|
||||||
|
|
||||||
# Load trained surrogate
|
# Load trained surrogate
|
||||||
surrogate = GenericSurrogate(config)
|
surrogate = GenericSurrogate(config)
|
||||||
@@ -577,7 +577,7 @@ class MultiStartLBFGS:
|
|||||||
surrogate_path: Path to surrogate_best.pt
|
surrogate_path: Path to surrogate_best.pt
|
||||||
config: Optimization config dict
|
config: Optimization config dict
|
||||||
"""
|
"""
|
||||||
from optimization_engine.generic_surrogate import GenericSurrogate
|
from optimization_engine.processors.surrogates.generic_surrogate import GenericSurrogate
|
||||||
|
|
||||||
self.surrogate = GenericSurrogate(config)
|
self.surrogate = GenericSurrogate(config)
|
||||||
self.surrogate.load(surrogate_path)
|
self.surrogate.load(surrogate_path)
|
||||||
@@ -706,7 +706,7 @@ def run_lbfgs_polish(
|
|||||||
weights = [obj.get('weight', 1.0) for obj in config.get('objectives', [])]
|
weights = [obj.get('weight', 1.0) for obj in config.get('objectives', [])]
|
||||||
directions = [obj.get('direction', 'minimize') for obj in config.get('objectives', [])]
|
directions = [obj.get('direction', 'minimize') for obj in config.get('objectives', [])]
|
||||||
|
|
||||||
from optimization_engine.generic_surrogate import GenericSurrogate
|
from optimization_engine.processors.surrogates.generic_surrogate import GenericSurrogate
|
||||||
|
|
||||||
surrogate = GenericSurrogate(config)
|
surrogate = GenericSurrogate(config)
|
||||||
surrogate.load(surrogate_path)
|
surrogate.load(surrogate_path)
|
||||||
@@ -15,7 +15,7 @@ This module enables Atomizer to automatically adapt to different FEA problem
|
|||||||
types without requiring manual algorithm configuration.
|
types without requiring manual algorithm configuration.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
from optimization_engine.intelligent_optimizer import IntelligentOptimizer
|
from optimization_engine.core.intelligent_optimizer import IntelligentOptimizer
|
||||||
|
|
||||||
optimizer = IntelligentOptimizer(
|
optimizer = IntelligentOptimizer(
|
||||||
study_name="my_study",
|
study_name="my_study",
|
||||||
@@ -35,18 +35,18 @@ from typing import Dict, Callable, Optional, Any
|
|||||||
import json
|
import json
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from optimization_engine.landscape_analyzer import LandscapeAnalyzer, print_landscape_report
|
from optimization_engine.reporting.landscape_analyzer import LandscapeAnalyzer, print_landscape_report
|
||||||
from optimization_engine.strategy_selector import (
|
from optimization_engine.core.strategy_selector import (
|
||||||
IntelligentStrategySelector,
|
IntelligentStrategySelector,
|
||||||
create_sampler_from_config
|
create_sampler_from_config
|
||||||
)
|
)
|
||||||
from optimization_engine.strategy_portfolio import (
|
from optimization_engine.core.strategy_portfolio import (
|
||||||
StrategyTransitionManager,
|
StrategyTransitionManager,
|
||||||
AdaptiveStrategyCallback
|
AdaptiveStrategyCallback
|
||||||
)
|
)
|
||||||
from optimization_engine.adaptive_surrogate import AdaptiveExploitationCallback
|
from optimization_engine.processors.surrogates.adaptive_surrogate import AdaptiveExploitationCallback
|
||||||
from optimization_engine.adaptive_characterization import CharacterizationStoppingCriterion
|
from optimization_engine.processors.adaptive_characterization import CharacterizationStoppingCriterion
|
||||||
from optimization_engine.realtime_tracking import create_realtime_callback
|
from optimization_engine.utils.realtime_tracking import create_realtime_callback
|
||||||
|
|
||||||
|
|
||||||
class IntelligentOptimizer:
|
class IntelligentOptimizer:
|
||||||
@@ -13,7 +13,7 @@ Classes:
|
|||||||
- RuntimeAdvisor: Monitors optimization and suggests pivots
|
- RuntimeAdvisor: Monitors optimization and suggests pivots
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
from optimization_engine.method_selector import AdaptiveMethodSelector
|
from optimization_engine.core.method_selector import AdaptiveMethodSelector
|
||||||
|
|
||||||
selector = AdaptiveMethodSelector()
|
selector = AdaptiveMethodSelector()
|
||||||
recommendation = selector.recommend(config_path)
|
recommendation = selector.recommend(config_path)
|
||||||
@@ -24,7 +24,7 @@ from datetime import datetime
|
|||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
from optimization_engine.plugins import HookManager
|
from optimization_engine.plugins import HookManager
|
||||||
from optimization_engine.training_data_exporter import create_exporter_from_config
|
from optimization_engine.processors.surrogates.training_data_exporter import create_exporter_from_config
|
||||||
|
|
||||||
|
|
||||||
class OptimizationRunner:
|
class OptimizationRunner:
|
||||||
@@ -733,7 +733,7 @@ class OptimizationRunner:
|
|||||||
if post_config.get('generate_plots', False):
|
if post_config.get('generate_plots', False):
|
||||||
print("\nGenerating visualization plots...")
|
print("\nGenerating visualization plots...")
|
||||||
try:
|
try:
|
||||||
from optimization_engine.visualizer import OptimizationVisualizer
|
from optimization_engine.reporting.visualizer import OptimizationVisualizer
|
||||||
|
|
||||||
formats = post_config.get('plot_formats', ['png', 'pdf'])
|
formats = post_config.get('plot_formats', ['png', 'pdf'])
|
||||||
visualizer = OptimizationVisualizer(self.output_dir)
|
visualizer = OptimizationVisualizer(self.output_dir)
|
||||||
@@ -752,7 +752,7 @@ class OptimizationRunner:
|
|||||||
if post_config.get('cleanup_models', False):
|
if post_config.get('cleanup_models', False):
|
||||||
print("\nCleaning up trial models...")
|
print("\nCleaning up trial models...")
|
||||||
try:
|
try:
|
||||||
from optimization_engine.model_cleanup import ModelCleanup
|
from optimization_engine.nx.model_cleanup import ModelCleanup
|
||||||
|
|
||||||
keep_n = post_config.get('keep_top_n_models', 10)
|
keep_n = post_config.get('keep_top_n_models', 10)
|
||||||
dry_run = post_config.get('cleanup_dry_run', False)
|
dry_run = post_config.get('cleanup_dry_run', False)
|
||||||
@@ -20,8 +20,8 @@ import numpy as np
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import optuna
|
import optuna
|
||||||
|
|
||||||
from optimization_engine.runner import OptimizationRunner
|
from optimization_engine.core.runner import OptimizationRunner
|
||||||
from optimization_engine.neural_surrogate import (
|
from optimization_engine.processors.surrogates.neural_surrogate import (
|
||||||
create_surrogate_from_config,
|
create_surrogate_from_config,
|
||||||
create_hybrid_optimizer_from_config,
|
create_hybrid_optimizer_from_config,
|
||||||
NeuralSurrogate,
|
NeuralSurrogate,
|
||||||
@@ -1,242 +1,278 @@
|
|||||||
"""
|
"""
|
||||||
Generic OP2 Extractor
|
Robust OP2 Extraction - Handles pyNastran FATAL flag issues gracefully.
|
||||||
====================
|
|
||||||
|
|
||||||
Reusable extractor for NX Nastran OP2 files using pyNastran.
|
This module provides a more robust OP2 extraction that:
|
||||||
Extracts mass properties, forces, displacements, stresses, etc.
|
1. Catches pyNastran FATAL flag exceptions
|
||||||
|
2. Checks if eigenvalues were actually extracted despite the flag
|
||||||
|
3. Falls back to F06 extraction if OP2 fails
|
||||||
|
4. Logs detailed failure information
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
extractor = OP2Extractor(op2_file="model.op2")
|
from optimization_engine.extractors.op2_extractor import robust_extract_first_frequency
|
||||||
mass = extractor.extract_mass()
|
|
||||||
forces = extractor.extract_grid_point_forces()
|
frequency = robust_extract_first_frequency(
|
||||||
|
op2_file=Path("results.op2"),
|
||||||
|
mode_number=1,
|
||||||
|
f06_file=Path("results.f06"), # Optional fallback
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, Any, Optional, List
|
from typing import Optional, Tuple
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
try:
|
|
||||||
from pyNastran.op2.op2 import read_op2
|
|
||||||
except ImportError:
|
|
||||||
raise ImportError("pyNastran is required. Install with: pip install pyNastran")
|
|
||||||
|
|
||||||
|
def robust_extract_first_frequency(
|
||||||
class OP2Extractor:
|
op2_file: Path,
|
||||||
"""
|
mode_number: int = 1,
|
||||||
Generic extractor for Nastran OP2 files.
|
f06_file: Optional[Path] = None,
|
||||||
|
verbose: bool = False
|
||||||
Supports:
|
|
||||||
- Mass properties
|
|
||||||
- Grid point forces
|
|
||||||
- Displacements
|
|
||||||
- Stresses
|
|
||||||
- Strains
|
|
||||||
- Element forces
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, op2_file: str):
|
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
op2_file: Path to .op2 file
|
|
||||||
"""
|
|
||||||
self.op2_file = Path(op2_file)
|
|
||||||
self._op2_model = None
|
|
||||||
|
|
||||||
def _load_op2(self):
|
|
||||||
"""Lazy load OP2 file"""
|
|
||||||
if self._op2_model is None:
|
|
||||||
if not self.op2_file.exists():
|
|
||||||
raise FileNotFoundError(f"OP2 file not found: {self.op2_file}")
|
|
||||||
self._op2_model = read_op2(str(self.op2_file), debug=False)
|
|
||||||
return self._op2_model
|
|
||||||
|
|
||||||
def extract_mass(self, subcase_id: Optional[int] = None) -> Dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Extract mass properties from OP2.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict: {
|
|
||||||
'mass_kg': total mass in kg,
|
|
||||||
'mass_g': total mass in grams,
|
|
||||||
'cg': [x, y, z] center of gravity,
|
|
||||||
'inertia': 3x3 inertia matrix
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
op2 = self._load_op2()
|
|
||||||
|
|
||||||
# Get grid point weight (mass properties)
|
|
||||||
if not hasattr(op2, 'grid_point_weight') or not op2.grid_point_weight:
|
|
||||||
raise ValueError("No mass properties found in OP2 file")
|
|
||||||
|
|
||||||
gpw = op2.grid_point_weight
|
|
||||||
|
|
||||||
# Mass is typically in the first element of MO matrix (reference point mass)
|
|
||||||
# OP2 stores mass in ton, mm, sec units typically
|
|
||||||
mass_matrix = gpw.MO[0, 0] if hasattr(gpw, 'MO') else None
|
|
||||||
|
|
||||||
# Get reference point
|
|
||||||
if hasattr(gpw, 'reference_point') and gpw.reference_point:
|
|
||||||
ref_point = gpw.reference_point
|
|
||||||
else:
|
|
||||||
ref_point = 0
|
|
||||||
|
|
||||||
# Extract mass (convert based on units)
|
|
||||||
# Nastran default: ton-mm-sec → need to convert to kg
|
|
||||||
if mass_matrix is not None:
|
|
||||||
mass_ton = mass_matrix
|
|
||||||
mass_kg = mass_ton * 1000.0 # 1 ton = 1000 kg
|
|
||||||
else:
|
|
||||||
raise ValueError("Could not extract mass from OP2")
|
|
||||||
|
|
||||||
# Extract CG if available
|
|
||||||
cg = [0.0, 0.0, 0.0]
|
|
||||||
if hasattr(gpw, 'cg'):
|
|
||||||
cg = gpw.cg.tolist() if hasattr(gpw.cg, 'tolist') else list(gpw.cg)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'mass_kg': mass_kg,
|
|
||||||
'mass_g': mass_kg * 1000.0,
|
|
||||||
'mass_ton': mass_ton,
|
|
||||||
'cg': cg,
|
|
||||||
'reference_point': ref_point,
|
|
||||||
'units': 'ton-mm-sec (converted to kg)',
|
|
||||||
}
|
|
||||||
|
|
||||||
def extract_grid_point_forces(
|
|
||||||
self,
|
|
||||||
subcase_id: Optional[int] = None,
|
|
||||||
component: str = "total" # total, fx, fy, fz, mx, my, mz
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Extract grid point forces from OP2.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
subcase_id: Subcase ID (if None, uses first available)
|
|
||||||
component: Force component to extract
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict: {
|
|
||||||
'force': resultant force value,
|
|
||||||
'all_forces': list of forces at each grid point,
|
|
||||||
'max_force': maximum force,
|
|
||||||
'total_force': sum of all forces
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
op2 = self._load_op2()
|
|
||||||
|
|
||||||
if not hasattr(op2, 'grid_point_forces') or not op2.grid_point_forces:
|
|
||||||
raise ValueError("No grid point forces found in OP2 file")
|
|
||||||
|
|
||||||
# Get first subcase if not specified
|
|
||||||
if subcase_id is None:
|
|
||||||
subcase_id = list(op2.grid_point_forces.keys())[0]
|
|
||||||
|
|
||||||
gpf = op2.grid_point_forces[subcase_id]
|
|
||||||
|
|
||||||
# Extract forces based on component
|
|
||||||
# Grid point forces table typically has columns: fx, fy, fz, mx, my, mz
|
|
||||||
if component == "total":
|
|
||||||
# Calculate resultant force: sqrt(fx^2 + fy^2 + fz^2)
|
|
||||||
forces = np.sqrt(gpf.data[:, 0]**2 + gpf.data[:, 1]**2 + gpf.data[:, 2]**2)
|
|
||||||
elif component == "fx":
|
|
||||||
forces = gpf.data[:, 0]
|
|
||||||
elif component == "fy":
|
|
||||||
forces = gpf.data[:, 1]
|
|
||||||
elif component == "fz":
|
|
||||||
forces = gpf.data[:, 2]
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unknown component: {component}")
|
|
||||||
|
|
||||||
return {
|
|
||||||
'force': float(np.max(np.abs(forces))),
|
|
||||||
'all_forces': forces.tolist(),
|
|
||||||
'max_force': float(np.max(forces)),
|
|
||||||
'min_force': float(np.min(forces)),
|
|
||||||
'total_force': float(np.sum(forces)),
|
|
||||||
'component': component,
|
|
||||||
'subcase_id': subcase_id,
|
|
||||||
}
|
|
||||||
|
|
||||||
def extract_applied_loads(self, subcase_id: Optional[int] = None) -> Dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Extract applied loads from OP2 file.
|
|
||||||
|
|
||||||
This attempts to get load vector information if available.
|
|
||||||
Note: Not all OP2 files contain this data.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict: Load information
|
|
||||||
"""
|
|
||||||
op2 = self._load_op2()
|
|
||||||
|
|
||||||
# Try to get load vectors
|
|
||||||
if hasattr(op2, 'load_vectors') and op2.load_vectors:
|
|
||||||
if subcase_id is None:
|
|
||||||
subcase_id = list(op2.load_vectors.keys())[0]
|
|
||||||
|
|
||||||
lv = op2.load_vectors[subcase_id]
|
|
||||||
loads = lv.data
|
|
||||||
|
|
||||||
return {
|
|
||||||
'total_load': float(np.sum(np.abs(loads))),
|
|
||||||
'max_load': float(np.max(np.abs(loads))),
|
|
||||||
'load_resultant': float(np.linalg.norm(loads)),
|
|
||||||
'subcase_id': subcase_id,
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
# Fallback: use grid point forces as approximation
|
|
||||||
return self.extract_grid_point_forces(subcase_id)
|
|
||||||
|
|
||||||
|
|
||||||
def extract_mass_from_op2(op2_file: str) -> float:
|
|
||||||
"""
|
|
||||||
Convenience function to extract mass in kg.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
op2_file: Path to .op2 file
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Mass in kilograms
|
|
||||||
"""
|
|
||||||
extractor = OP2Extractor(op2_file)
|
|
||||||
result = extractor.extract_mass()
|
|
||||||
return result['mass_kg']
|
|
||||||
|
|
||||||
|
|
||||||
def extract_force_from_op2(
|
|
||||||
op2_file: str,
|
|
||||||
component: str = "fz"
|
|
||||||
) -> float:
|
) -> float:
|
||||||
"""
|
"""
|
||||||
Convenience function to extract force component.
|
Robustly extract natural frequency from OP2 file, handling pyNastran issues.
|
||||||
|
|
||||||
|
This function attempts multiple strategies:
|
||||||
|
1. Standard pyNastran OP2 reading
|
||||||
|
2. Force reading with debug=False to ignore FATAL flags
|
||||||
|
3. Partial OP2 reading (extract eigenvalues even if FATAL flag exists)
|
||||||
|
4. Fallback to F06 file parsing (if provided)
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
op2_file: Path to .op2 file
|
op2_file: Path to OP2 output file
|
||||||
component: Force component (fx, fy, fz, or total)
|
mode_number: Mode number to extract (1-based index)
|
||||||
|
f06_file: Optional F06 file for fallback extraction
|
||||||
|
verbose: Print detailed extraction information
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Force value
|
Natural frequency in Hz
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If frequency cannot be extracted by any method
|
||||||
"""
|
"""
|
||||||
extractor = OP2Extractor(op2_file)
|
from pyNastran.op2.op2 import OP2
|
||||||
result = extractor.extract_grid_point_forces(component=component)
|
|
||||||
return result['force']
|
|
||||||
|
|
||||||
|
if not op2_file.exists():
|
||||||
|
raise FileNotFoundError(f"OP2 file not found: {op2_file}")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
# Strategy 1: Try standard OP2 reading
|
||||||
# Example usage
|
|
||||||
import sys
|
|
||||||
if len(sys.argv) > 1:
|
|
||||||
op2_file = sys.argv[1]
|
|
||||||
extractor = OP2Extractor(op2_file)
|
|
||||||
|
|
||||||
# Extract mass
|
|
||||||
mass_result = extractor.extract_mass()
|
|
||||||
print(f"Mass: {mass_result['mass_kg']:.6f} kg")
|
|
||||||
print(f"CG: {mass_result['cg']}")
|
|
||||||
|
|
||||||
# Extract forces
|
|
||||||
try:
|
try:
|
||||||
force_result = extractor.extract_grid_point_forces(component="fz")
|
if verbose:
|
||||||
print(f"Max Fz: {force_result['force']:.2f} N")
|
print(f"[OP2 EXTRACT] Attempting standard read: {op2_file.name}")
|
||||||
except ValueError as e:
|
|
||||||
print(f"Forces not available: {e}")
|
model = OP2()
|
||||||
|
model.read_op2(str(op2_file))
|
||||||
|
|
||||||
|
if hasattr(model, 'eigenvalues') and len(model.eigenvalues) > 0:
|
||||||
|
frequency = _extract_frequency_from_model(model, mode_number)
|
||||||
|
if verbose:
|
||||||
|
print(f"[OP2 EXTRACT] ✓ Success (standard read): {frequency:.6f} Hz")
|
||||||
|
return frequency
|
||||||
|
else:
|
||||||
|
raise ValueError("No eigenvalues found in OP2 file")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if verbose:
|
||||||
|
print(f"[OP2 EXTRACT] ✗ Standard read failed: {str(e)[:100]}")
|
||||||
|
|
||||||
|
# Check if this is a FATAL flag issue
|
||||||
|
is_fatal_flag = 'FATAL' in str(e) and 'op2_reader' in str(e.__class__.__module__)
|
||||||
|
|
||||||
|
if is_fatal_flag:
|
||||||
|
# Strategy 2: Try reading with more lenient settings
|
||||||
|
if verbose:
|
||||||
|
print(f"[OP2 EXTRACT] Detected pyNastran FATAL flag issue")
|
||||||
|
print(f"[OP2 EXTRACT] Attempting partial extraction...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
model = OP2()
|
||||||
|
# Try to read with debug=False and skip_undefined_matrices=True
|
||||||
|
model.read_op2(
|
||||||
|
str(op2_file),
|
||||||
|
debug=False,
|
||||||
|
skip_undefined_matrices=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if eigenvalues were extracted despite FATAL
|
||||||
|
if hasattr(model, 'eigenvalues') and len(model.eigenvalues) > 0:
|
||||||
|
frequency = _extract_frequency_from_model(model, mode_number)
|
||||||
|
if verbose:
|
||||||
|
print(f"[OP2 EXTRACT] ✓ Success (lenient mode): {frequency:.6f} Hz")
|
||||||
|
print(f"[OP2 EXTRACT] Note: pyNastran reported FATAL but data is valid!")
|
||||||
|
return frequency
|
||||||
|
|
||||||
|
except Exception as e2:
|
||||||
|
if verbose:
|
||||||
|
print(f"[OP2 EXTRACT] ✗ Lenient read also failed: {str(e2)[:100]}")
|
||||||
|
|
||||||
|
# Strategy 3: Fallback to F06 parsing
|
||||||
|
if f06_file and f06_file.exists():
|
||||||
|
if verbose:
|
||||||
|
print(f"[OP2 EXTRACT] Falling back to F06 extraction: {f06_file.name}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
frequency = extract_frequency_from_f06(f06_file, mode_number, verbose=verbose)
|
||||||
|
if verbose:
|
||||||
|
print(f"[OP2 EXTRACT] ✓ Success (F06 fallback): {frequency:.6f} Hz")
|
||||||
|
return frequency
|
||||||
|
|
||||||
|
except Exception as e3:
|
||||||
|
if verbose:
|
||||||
|
print(f"[OP2 EXTRACT] ✗ F06 extraction failed: {str(e3)}")
|
||||||
|
|
||||||
|
# All strategies failed
|
||||||
|
raise ValueError(
|
||||||
|
f"Could not extract frequency from OP2 file: {op2_file.name}. "
|
||||||
|
f"Original error: {str(e)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_frequency_from_model(model, mode_number: int) -> float:
|
||||||
|
"""Extract frequency from loaded OP2 model."""
|
||||||
|
if not hasattr(model, 'eigenvalues') or len(model.eigenvalues) == 0:
|
||||||
|
raise ValueError("No eigenvalues found in model")
|
||||||
|
|
||||||
|
# Get first subcase
|
||||||
|
subcase = list(model.eigenvalues.keys())[0]
|
||||||
|
eig_obj = model.eigenvalues[subcase]
|
||||||
|
|
||||||
|
# Check if mode exists
|
||||||
|
if mode_number > len(eig_obj.eigenvalues):
|
||||||
|
raise ValueError(
|
||||||
|
f"Mode {mode_number} not found. "
|
||||||
|
f"Only {len(eig_obj.eigenvalues)} modes available"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract eigenvalue and convert to frequency
|
||||||
|
eigenvalue = eig_obj.eigenvalues[mode_number - 1]
|
||||||
|
angular_freq = np.sqrt(abs(eigenvalue)) # Use abs to handle numerical precision issues
|
||||||
|
frequency_hz = angular_freq / (2 * np.pi)
|
||||||
|
|
||||||
|
return float(frequency_hz)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_frequency_from_f06(
|
||||||
|
f06_file: Path,
|
||||||
|
mode_number: int = 1,
|
||||||
|
verbose: bool = False
|
||||||
|
) -> float:
|
||||||
|
"""
|
||||||
|
Extract natural frequency from F06 text file (fallback method).
|
||||||
|
|
||||||
|
Parses the F06 file to find eigenvalue results table and extracts frequency.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
f06_file: Path to F06 output file
|
||||||
|
mode_number: Mode number to extract (1-based index)
|
||||||
|
verbose: Print extraction details
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Natural frequency in Hz
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If frequency cannot be found in F06
|
||||||
|
"""
|
||||||
|
if not f06_file.exists():
|
||||||
|
raise FileNotFoundError(f"F06 file not found: {f06_file}")
|
||||||
|
|
||||||
|
with open(f06_file, 'r', encoding='latin-1', errors='ignore') as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# Look for eigenvalue table
|
||||||
|
# Nastran F06 format has eigenvalue results like:
|
||||||
|
# R E A L E I G E N V A L U E S
|
||||||
|
# MODE EXTRACTION EIGENVALUE RADIANS CYCLES GENERALIZED GENERALIZED
|
||||||
|
# NO. ORDER MASS STIFFNESS
|
||||||
|
# 1 1 -6.602743E+04 2.569656E+02 4.089338E+01 1.000000E+00 6.602743E+04
|
||||||
|
|
||||||
|
lines = content.split('\n')
|
||||||
|
|
||||||
|
# Find eigenvalue table
|
||||||
|
eigenvalue_section_start = None
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
if 'R E A L E I G E N V A L U E S' in line:
|
||||||
|
eigenvalue_section_start = i
|
||||||
|
break
|
||||||
|
|
||||||
|
if eigenvalue_section_start is None:
|
||||||
|
raise ValueError("Eigenvalue table not found in F06 file")
|
||||||
|
|
||||||
|
# Parse eigenvalue table (starts a few lines after header)
|
||||||
|
for i in range(eigenvalue_section_start + 3, min(eigenvalue_section_start + 100, len(lines))):
|
||||||
|
line = lines[i].strip()
|
||||||
|
|
||||||
|
if not line or line.startswith('1'): # Page break
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Parse line with mode data
|
||||||
|
parts = line.split()
|
||||||
|
if len(parts) >= 5:
|
||||||
|
try:
|
||||||
|
mode_num = int(parts[0])
|
||||||
|
if mode_num == mode_number:
|
||||||
|
# Frequency is in column 5 (CYCLES)
|
||||||
|
frequency = float(parts[4])
|
||||||
|
if verbose:
|
||||||
|
print(f"[F06 EXTRACT] Found mode {mode_num}: {frequency:.6f} Hz")
|
||||||
|
return frequency
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
continue
|
||||||
|
|
||||||
|
raise ValueError(f"Mode {mode_number} not found in F06 eigenvalue table")
|
||||||
|
|
||||||
|
|
||||||
|
def validate_op2_file(op2_file: Path, f06_file: Optional[Path] = None) -> Tuple[bool, str]:
|
||||||
|
"""
|
||||||
|
Validate if an OP2 file contains usable eigenvalue data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
op2_file: Path to OP2 file
|
||||||
|
f06_file: Optional F06 file for cross-reference
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(is_valid, message): Tuple of validation status and explanation
|
||||||
|
"""
|
||||||
|
if not op2_file.exists():
|
||||||
|
return False, f"OP2 file does not exist: {op2_file}"
|
||||||
|
|
||||||
|
if op2_file.stat().st_size == 0:
|
||||||
|
return False, "OP2 file is empty"
|
||||||
|
|
||||||
|
# Try to extract first frequency
|
||||||
|
try:
|
||||||
|
frequency = robust_extract_first_frequency(
|
||||||
|
op2_file,
|
||||||
|
mode_number=1,
|
||||||
|
f06_file=f06_file,
|
||||||
|
verbose=False
|
||||||
|
)
|
||||||
|
return True, f"Valid OP2 file (first frequency: {frequency:.6f} Hz)"
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return False, f"Cannot extract data from OP2: {str(e)}"
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience function (same signature as old function for backward compatibility)
|
||||||
|
def extract_first_frequency(op2_file: Path, mode_number: int = 1) -> float:
|
||||||
|
"""
|
||||||
|
Extract first natural frequency (backward compatible with old function).
|
||||||
|
|
||||||
|
This is the simple version - just use robust_extract_first_frequency directly
|
||||||
|
for more control.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
op2_file: Path to OP2 file
|
||||||
|
mode_number: Mode number (1-based)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Frequency in Hz
|
||||||
|
"""
|
||||||
|
# Try to find F06 file in same directory
|
||||||
|
f06_file = op2_file.with_suffix('.f06')
|
||||||
|
|
||||||
|
return robust_extract_first_frequency(
|
||||||
|
op2_file,
|
||||||
|
mode_number=mode_number,
|
||||||
|
f06_file=f06_file if f06_file.exists() else None,
|
||||||
|
verbose=False
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"feature_registry": {
|
"feature_registry": {
|
||||||
"version": "0.2.0",
|
"version": "0.3.0",
|
||||||
"last_updated": "2025-01-16",
|
"last_updated": "2025-12-29",
|
||||||
"description": "Comprehensive catalog of Atomizer capabilities for LLM-driven optimization",
|
"description": "Comprehensive catalog of Atomizer capabilities for LLM-driven optimization",
|
||||||
"architecture_doc": "docs/FEATURE_REGISTRY_ARCHITECTURE.md",
|
"architecture_doc": "docs/FEATURE_REGISTRY_ARCHITECTURE.md",
|
||||||
"categories": {
|
"categories": {
|
||||||
@@ -162,9 +162,9 @@
|
|||||||
"lifecycle_stage": "all",
|
"lifecycle_stage": "all",
|
||||||
"abstraction_level": "workflow",
|
"abstraction_level": "workflow",
|
||||||
"implementation": {
|
"implementation": {
|
||||||
"file_path": "optimization_engine/runner.py",
|
"file_path": "optimization_engine/core/runner.py",
|
||||||
"function_name": "run_optimization",
|
"function_name": "run_optimization",
|
||||||
"entry_point": "from optimization_engine.runner import run_optimization"
|
"entry_point": "from optimization_engine.core.runner import run_optimization"
|
||||||
},
|
},
|
||||||
"interface": {
|
"interface": {
|
||||||
"inputs": [
|
"inputs": [
|
||||||
@@ -240,7 +240,7 @@
|
|||||||
"lifecycle_stage": "optimization",
|
"lifecycle_stage": "optimization",
|
||||||
"abstraction_level": "primitive",
|
"abstraction_level": "primitive",
|
||||||
"implementation": {
|
"implementation": {
|
||||||
"file_path": "optimization_engine/runner.py",
|
"file_path": "optimization_engine/core/runner.py",
|
||||||
"function_name": "optuna.samplers.TPESampler",
|
"function_name": "optuna.samplers.TPESampler",
|
||||||
"entry_point": "import optuna.samplers.TPESampler"
|
"entry_point": "import optuna.samplers.TPESampler"
|
||||||
},
|
},
|
||||||
@@ -295,9 +295,9 @@
|
|||||||
"lifecycle_stage": "solve",
|
"lifecycle_stage": "solve",
|
||||||
"abstraction_level": "primitive",
|
"abstraction_level": "primitive",
|
||||||
"implementation": {
|
"implementation": {
|
||||||
"file_path": "optimization_engine/nx_solver.py",
|
"file_path": "optimization_engine/nx/solver.py",
|
||||||
"function_name": "run_nx_simulation",
|
"function_name": "run_nx_simulation",
|
||||||
"entry_point": "from optimization_engine.nx_solver import run_nx_simulation"
|
"entry_point": "from optimization_engine.nx.solver import run_nx_simulation"
|
||||||
},
|
},
|
||||||
"interface": {
|
"interface": {
|
||||||
"inputs": [
|
"inputs": [
|
||||||
@@ -370,9 +370,9 @@
|
|||||||
"lifecycle_stage": "pre_solve",
|
"lifecycle_stage": "pre_solve",
|
||||||
"abstraction_level": "primitive",
|
"abstraction_level": "primitive",
|
||||||
"implementation": {
|
"implementation": {
|
||||||
"file_path": "optimization_engine/nx_updater.py",
|
"file_path": "optimization_engine/nx/updater.py",
|
||||||
"function_name": "update_nx_expressions",
|
"function_name": "update_nx_expressions",
|
||||||
"entry_point": "from optimization_engine.nx_updater import update_nx_expressions"
|
"entry_point": "from optimization_engine.nx.updater import update_nx_expressions"
|
||||||
},
|
},
|
||||||
"interface": {
|
"interface": {
|
||||||
"inputs": [
|
"inputs": [
|
||||||
@@ -558,9 +558,9 @@
|
|||||||
"lifecycle_stage": "pre_optimization",
|
"lifecycle_stage": "pre_optimization",
|
||||||
"abstraction_level": "composite",
|
"abstraction_level": "composite",
|
||||||
"implementation": {
|
"implementation": {
|
||||||
"file_path": "optimization_engine/runner.py",
|
"file_path": "optimization_engine/study/creator.py",
|
||||||
"function_name": "setup_study",
|
"function_name": "setup_study",
|
||||||
"entry_point": "from optimization_engine.runner import setup_study"
|
"entry_point": "from optimization_engine.study.creator import setup_study"
|
||||||
},
|
},
|
||||||
"interface": {
|
"interface": {
|
||||||
"inputs": [
|
"inputs": [
|
||||||
|
|||||||
@@ -21,8 +21,8 @@ import importlib.util
|
|||||||
import logging
|
import logging
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
|
||||||
from optimization_engine.pynastran_research_agent import PyNastranResearchAgent, ExtractionPattern
|
from optimization_engine.future.pynastran_research_agent import PyNastranResearchAgent, ExtractionPattern
|
||||||
from optimization_engine.extractor_library import ExtractorLibrary, create_study_manifest
|
from optimization_engine.extractors.extractor_library import ExtractorLibrary, create_study_manifest
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -296,7 +296,7 @@ class StepClassifier:
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Test the step classifier."""
|
"""Test the step classifier."""
|
||||||
from optimization_engine.workflow_decomposer import WorkflowDecomposer
|
from optimization_engine.future.workflow_decomposer import WorkflowDecomposer
|
||||||
|
|
||||||
print("Step Classifier Test")
|
print("Step Classifier Test")
|
||||||
print("=" * 80)
|
print("=" * 80)
|
||||||
@@ -12,7 +12,7 @@ Last Updated: 2025-01-16
|
|||||||
from typing import List, Dict, Any
|
from typing import List, Dict, Any
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from optimization_engine.capability_matcher import CapabilityMatch, StepMatch
|
from optimization_engine.config.capability_matcher import CapabilityMatch, StepMatch
|
||||||
|
|
||||||
|
|
||||||
class TargetedResearchPlanner:
|
class TargetedResearchPlanner:
|
||||||
@@ -188,9 +188,9 @@ class TargetedResearchPlanner:
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Test the targeted research planner."""
|
"""Test the targeted research planner."""
|
||||||
from optimization_engine.codebase_analyzer import CodebaseCapabilityAnalyzer
|
from optimization_engine.utils.codebase_analyzer import CodebaseCapabilityAnalyzer
|
||||||
from optimization_engine.workflow_decomposer import WorkflowDecomposer
|
from optimization_engine.future.workflow_decomposer import WorkflowDecomposer
|
||||||
from optimization_engine.capability_matcher import CapabilityMatcher
|
from optimization_engine.config.capability_matcher import CapabilityMatcher
|
||||||
|
|
||||||
print("Targeted Research Planner Test")
|
print("Targeted Research Planner Test")
|
||||||
print("=" * 80)
|
print("=" * 80)
|
||||||
@@ -415,7 +415,7 @@ class ZernikeGNNOptimizer:
|
|||||||
"""
|
"""
|
||||||
import time
|
import time
|
||||||
import re
|
import re
|
||||||
from optimization_engine.nx_solver import NXSolver
|
from optimization_engine.nx.solver import NXSolver
|
||||||
from optimization_engine.extractors import ZernikeExtractor
|
from optimization_engine.extractors import ZernikeExtractor
|
||||||
|
|
||||||
study_dir = Path(study_dir)
|
study_dir = Path(study_dir)
|
||||||
|
|||||||
102
optimization_engine/interview/__init__.py
Normal file
102
optimization_engine/interview/__init__.py
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
"""
|
||||||
|
Atomizer Study Interview Mode
|
||||||
|
|
||||||
|
This module provides an intelligent interview system for gathering engineering requirements
|
||||||
|
before study generation. It systematically questions users about objectives, constraints,
|
||||||
|
and design variables to create accurate optimization configurations.
|
||||||
|
|
||||||
|
Components:
|
||||||
|
- StudyInterviewEngine: Main orchestrator
|
||||||
|
- QuestionEngine: Question flow and conditional logic
|
||||||
|
- InterviewStateManager: State persistence
|
||||||
|
- InterviewPresenter: Presentation abstraction (ClaudePresenter)
|
||||||
|
- EngineeringValidator: Engineering validation and anti-pattern detection
|
||||||
|
- InterviewIntelligence: Smart features (extractor mapping, complexity)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .interview_state import (
|
||||||
|
InterviewState,
|
||||||
|
InterviewPhase,
|
||||||
|
AnsweredQuestion,
|
||||||
|
InterviewStateManager,
|
||||||
|
LogEntry,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .question_engine import (
|
||||||
|
QuestionEngine,
|
||||||
|
Question,
|
||||||
|
QuestionOption,
|
||||||
|
QuestionCondition,
|
||||||
|
ValidationRule,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .interview_presenter import (
|
||||||
|
InterviewPresenter,
|
||||||
|
ClaudePresenter,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .study_interview import (
|
||||||
|
StudyInterviewEngine,
|
||||||
|
InterviewSession,
|
||||||
|
NextAction,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .engineering_validator import (
|
||||||
|
EngineeringValidator,
|
||||||
|
MaterialsDatabase,
|
||||||
|
AntiPatternDetector,
|
||||||
|
ValidationResult,
|
||||||
|
AntiPattern,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .interview_intelligence import (
|
||||||
|
InterviewIntelligence,
|
||||||
|
ExtractorMapper,
|
||||||
|
ExtractorSelection,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .study_blueprint import (
|
||||||
|
StudyBlueprint,
|
||||||
|
DesignVariable,
|
||||||
|
Objective,
|
||||||
|
Constraint,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
# State management
|
||||||
|
"InterviewState",
|
||||||
|
"InterviewPhase",
|
||||||
|
"AnsweredQuestion",
|
||||||
|
"InterviewStateManager",
|
||||||
|
"LogEntry",
|
||||||
|
# Question engine
|
||||||
|
"QuestionEngine",
|
||||||
|
"Question",
|
||||||
|
"QuestionOption",
|
||||||
|
"QuestionCondition",
|
||||||
|
"ValidationRule",
|
||||||
|
# Presentation
|
||||||
|
"InterviewPresenter",
|
||||||
|
"ClaudePresenter",
|
||||||
|
# Main engine
|
||||||
|
"StudyInterviewEngine",
|
||||||
|
"InterviewSession",
|
||||||
|
"NextAction",
|
||||||
|
# Validation
|
||||||
|
"EngineeringValidator",
|
||||||
|
"MaterialsDatabase",
|
||||||
|
"AntiPatternDetector",
|
||||||
|
"ValidationResult",
|
||||||
|
"AntiPattern",
|
||||||
|
# Intelligence
|
||||||
|
"InterviewIntelligence",
|
||||||
|
"ExtractorMapper",
|
||||||
|
"ExtractorSelection",
|
||||||
|
# Blueprint
|
||||||
|
"StudyBlueprint",
|
||||||
|
"DesignVariable",
|
||||||
|
"Objective",
|
||||||
|
"Constraint",
|
||||||
|
]
|
||||||
|
|
||||||
|
__version__ = "1.0.0"
|
||||||
781
optimization_engine/interview/engineering_validator.py
Normal file
781
optimization_engine/interview/engineering_validator.py
Normal file
@@ -0,0 +1,781 @@
|
|||||||
|
"""
|
||||||
|
Engineering Validator
|
||||||
|
|
||||||
|
Validates interview answers against engineering knowledge and detects anti-patterns.
|
||||||
|
Provides:
|
||||||
|
- MaterialsDatabase: Common materials with properties
|
||||||
|
- AntiPatternDetector: Detects optimization setup mistakes
|
||||||
|
- EngineeringValidator: Main validation logic
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Any, Optional, Tuple
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from difflib import SequenceMatcher
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Material:
|
||||||
|
"""Engineering material with properties."""
|
||||||
|
id: str
|
||||||
|
names: List[str]
|
||||||
|
category: str
|
||||||
|
properties: Dict[str, Any]
|
||||||
|
notes: Optional[str] = None
|
||||||
|
recommended_safety_factors: Optional[Dict[str, float]] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def density(self) -> Optional[float]:
|
||||||
|
return self.properties.get("density_kg_m3")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def yield_stress(self) -> Optional[float]:
|
||||||
|
return self.properties.get("yield_stress_mpa")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ultimate_stress(self) -> Optional[float]:
|
||||||
|
return self.properties.get("ultimate_stress_mpa")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def elastic_modulus(self) -> Optional[float]:
|
||||||
|
return self.properties.get("elastic_modulus_gpa")
|
||||||
|
|
||||||
|
def get_safe_stress(self, application: str = "static") -> Optional[float]:
|
||||||
|
"""Get safe stress limit with recommended safety factor."""
|
||||||
|
if self.yield_stress is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
sf = 1.5 # Default
|
||||||
|
if self.recommended_safety_factors:
|
||||||
|
sf = self.recommended_safety_factors.get(application, 1.5)
|
||||||
|
|
||||||
|
return self.yield_stress / sf
|
||||||
|
|
||||||
|
|
||||||
|
class MaterialsDatabase:
|
||||||
|
"""
|
||||||
|
Database of common engineering materials and properties.
|
||||||
|
|
||||||
|
Supports fuzzy name matching for user convenience.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, db_path: Optional[Path] = None):
|
||||||
|
"""
|
||||||
|
Initialize materials database.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db_path: Path to materials JSON. Uses default if None.
|
||||||
|
"""
|
||||||
|
if db_path is None:
|
||||||
|
db_path = Path(__file__).parent / "schemas" / "materials_database.json"
|
||||||
|
|
||||||
|
self.db_path = db_path
|
||||||
|
self.materials: Dict[str, Material] = {}
|
||||||
|
self._name_index: Dict[str, str] = {} # name -> material_id
|
||||||
|
|
||||||
|
self._load_database()
|
||||||
|
|
||||||
|
def _load_database(self) -> None:
|
||||||
|
"""Load materials from JSON file."""
|
||||||
|
if not self.db_path.exists():
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(self.db_path, "r", encoding="utf-8") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
for mat_data in data.get("materials", []):
|
||||||
|
material = Material(
|
||||||
|
id=mat_data["id"],
|
||||||
|
names=mat_data["names"],
|
||||||
|
category=mat_data["category"],
|
||||||
|
properties=mat_data["properties"],
|
||||||
|
notes=mat_data.get("notes"),
|
||||||
|
recommended_safety_factors=mat_data.get("recommended_safety_factors"),
|
||||||
|
)
|
||||||
|
self.materials[material.id] = material
|
||||||
|
|
||||||
|
# Build name index
|
||||||
|
for name in material.names:
|
||||||
|
self._name_index[name.lower()] = material.id
|
||||||
|
|
||||||
|
def get_material(self, name: str) -> Optional[Material]:
|
||||||
|
"""
|
||||||
|
Look up material by name (supports fuzzy matching).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Material name (e.g., "Al 6061-T6", "aluminum", "steel 304")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Material if found, None otherwise
|
||||||
|
"""
|
||||||
|
name_lower = name.lower().strip()
|
||||||
|
|
||||||
|
# Exact match
|
||||||
|
if name_lower in self._name_index:
|
||||||
|
return self.materials[self._name_index[name_lower]]
|
||||||
|
|
||||||
|
# Try by ID
|
||||||
|
if name_lower in self.materials:
|
||||||
|
return self.materials[name_lower]
|
||||||
|
|
||||||
|
# Fuzzy match
|
||||||
|
best_match = None
|
||||||
|
best_ratio = 0.6 # Minimum threshold
|
||||||
|
|
||||||
|
for indexed_name, mat_id in self._name_index.items():
|
||||||
|
ratio = SequenceMatcher(None, name_lower, indexed_name).ratio()
|
||||||
|
if ratio > best_ratio:
|
||||||
|
best_ratio = ratio
|
||||||
|
best_match = mat_id
|
||||||
|
|
||||||
|
if best_match:
|
||||||
|
return self.materials[best_match]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_yield_stress(self, material_name: str) -> Optional[float]:
|
||||||
|
"""Get yield stress for material in MPa."""
|
||||||
|
material = self.get_material(material_name)
|
||||||
|
return material.yield_stress if material else None
|
||||||
|
|
||||||
|
def validate_stress_limit(
|
||||||
|
self,
|
||||||
|
material_name: str,
|
||||||
|
limit: float,
|
||||||
|
safety_factor: float = 1.0,
|
||||||
|
application: str = "static"
|
||||||
|
) -> "ValidationResult":
|
||||||
|
"""
|
||||||
|
Check if stress limit is reasonable for material.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
material_name: Material name
|
||||||
|
limit: Proposed stress limit in MPa
|
||||||
|
safety_factor: Applied safety factor (if any)
|
||||||
|
application: Application type (static, fatigue, aerospace)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ValidationResult with status and message
|
||||||
|
"""
|
||||||
|
material = self.get_material(material_name)
|
||||||
|
|
||||||
|
if material is None:
|
||||||
|
return ValidationResult(
|
||||||
|
valid=True,
|
||||||
|
message=f"Material '{material_name}' not found in database. Unable to validate stress limit.",
|
||||||
|
severity="info"
|
||||||
|
)
|
||||||
|
|
||||||
|
if material.yield_stress is None:
|
||||||
|
return ValidationResult(
|
||||||
|
valid=True,
|
||||||
|
message=f"Material '{material.id}' does not have yield stress defined (e.g., brittle material).",
|
||||||
|
severity="info"
|
||||||
|
)
|
||||||
|
|
||||||
|
yield_stress = material.yield_stress
|
||||||
|
effective_limit = limit * safety_factor if safety_factor > 1 else limit
|
||||||
|
|
||||||
|
# Check various thresholds
|
||||||
|
if effective_limit > material.ultimate_stress if material.ultimate_stress else yield_stress:
|
||||||
|
return ValidationResult(
|
||||||
|
valid=False,
|
||||||
|
message=f"Stress limit ({limit} MPa) exceeds ultimate stress ({material.ultimate_stress or yield_stress} MPa) for {material.id}",
|
||||||
|
severity="error",
|
||||||
|
suggestion=f"Reduce stress limit to below {(material.ultimate_stress or yield_stress) / 1.5:.0f} MPa"
|
||||||
|
)
|
||||||
|
|
||||||
|
if effective_limit > yield_stress:
|
||||||
|
return ValidationResult(
|
||||||
|
valid=True, # Warning, not error
|
||||||
|
message=f"Stress limit ({limit} MPa) exceeds yield stress ({yield_stress} MPa) for {material.id}. This allows plastic deformation.",
|
||||||
|
severity="warning",
|
||||||
|
suggestion=f"Consider reducing to {yield_stress / 1.5:.0f} MPa (SF=1.5)"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get recommended safe stress
|
||||||
|
safe_stress = material.get_safe_stress(application)
|
||||||
|
if safe_stress and limit > safe_stress:
|
||||||
|
rec_sf = material.recommended_safety_factors.get(application, 1.5) if material.recommended_safety_factors else 1.5
|
||||||
|
return ValidationResult(
|
||||||
|
valid=True,
|
||||||
|
message=f"Stress limit ({limit} MPa) is {limit/yield_stress*100:.0f}% of yield. Recommended safety factor for {application}: {rec_sf}",
|
||||||
|
severity="info",
|
||||||
|
suggestion=f"Typical {application} limit: {safe_stress:.0f} MPa"
|
||||||
|
)
|
||||||
|
|
||||||
|
return ValidationResult(
|
||||||
|
valid=True,
|
||||||
|
message=f"Stress limit ({limit} MPa) is acceptable for {material.id} (yield: {yield_stress} MPa)",
|
||||||
|
severity="ok"
|
||||||
|
)
|
||||||
|
|
||||||
|
def list_materials(self, category: Optional[str] = None) -> List[Material]:
|
||||||
|
"""List all materials, optionally filtered by category."""
|
||||||
|
materials = list(self.materials.values())
|
||||||
|
if category:
|
||||||
|
materials = [m for m in materials if m.category == category]
|
||||||
|
return materials
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ValidationResult:
|
||||||
|
"""Result of a validation check."""
|
||||||
|
valid: bool
|
||||||
|
message: str
|
||||||
|
severity: str = "ok" # ok, info, warning, error
|
||||||
|
suggestion: Optional[str] = None
|
||||||
|
field: Optional[str] = None
|
||||||
|
|
||||||
|
def is_blocking(self) -> bool:
|
||||||
|
"""Check if this result blocks proceeding."""
|
||||||
|
return self.severity == "error"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AntiPattern:
|
||||||
|
"""Detected anti-pattern."""
|
||||||
|
id: str
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
severity: str # error, warning, info
|
||||||
|
fix_suggestion: Optional[str] = None
|
||||||
|
auto_fix: Optional[Dict[str, Any]] = None
|
||||||
|
acknowledged: bool = False
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"id": self.id,
|
||||||
|
"name": self.name,
|
||||||
|
"description": self.description,
|
||||||
|
"severity": self.severity,
|
||||||
|
"fix_suggestion": self.fix_suggestion,
|
||||||
|
"auto_fix": self.auto_fix,
|
||||||
|
"acknowledged": self.acknowledged,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class AntiPatternDetector:
|
||||||
|
"""
|
||||||
|
Detects common optimization setup mistakes.
|
||||||
|
|
||||||
|
Loads patterns from JSON and evaluates against interview state.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, patterns_path: Optional[Path] = None):
|
||||||
|
"""
|
||||||
|
Initialize anti-pattern detector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
patterns_path: Path to patterns JSON. Uses default if None.
|
||||||
|
"""
|
||||||
|
if patterns_path is None:
|
||||||
|
patterns_path = Path(__file__).parent / "schemas" / "anti_patterns.json"
|
||||||
|
|
||||||
|
self.patterns_path = patterns_path
|
||||||
|
self.patterns: List[Dict[str, Any]] = []
|
||||||
|
|
||||||
|
self._load_patterns()
|
||||||
|
|
||||||
|
def _load_patterns(self) -> None:
|
||||||
|
"""Load patterns from JSON file."""
|
||||||
|
if not self.patterns_path.exists():
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(self.patterns_path, "r", encoding="utf-8") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
self.patterns = data.get("patterns", [])
|
||||||
|
|
||||||
|
def check_all(self, state: "InterviewState", introspection: Dict[str, Any] = None) -> List[AntiPattern]:
|
||||||
|
"""
|
||||||
|
Run all anti-pattern checks.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: Current interview state
|
||||||
|
introspection: Optional introspection results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of detected anti-patterns
|
||||||
|
"""
|
||||||
|
detected = []
|
||||||
|
context = self._build_context(state, introspection or {})
|
||||||
|
|
||||||
|
for pattern in self.patterns:
|
||||||
|
if self._evaluate_condition(pattern.get("condition", {}), context):
|
||||||
|
detected.append(AntiPattern(
|
||||||
|
id=pattern["id"],
|
||||||
|
name=pattern["name"],
|
||||||
|
description=pattern["description"],
|
||||||
|
severity=pattern["severity"],
|
||||||
|
fix_suggestion=pattern.get("fix_suggestion"),
|
||||||
|
auto_fix=pattern.get("auto_fix"),
|
||||||
|
))
|
||||||
|
|
||||||
|
return detected
|
||||||
|
|
||||||
|
def _build_context(self, state: "InterviewState", introspection: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""Build evaluation context from state and introspection."""
|
||||||
|
answers = state.answers
|
||||||
|
|
||||||
|
# Extract objectives as list of goal values
|
||||||
|
objectives_list = []
|
||||||
|
for obj in answers.get("objectives", []):
|
||||||
|
if isinstance(obj, dict):
|
||||||
|
objectives_list.append(obj.get("goal", ""))
|
||||||
|
else:
|
||||||
|
objectives_list.append(str(obj))
|
||||||
|
|
||||||
|
# Add secondary objectives if present
|
||||||
|
for obj in answers.get("objectives_secondary", []):
|
||||||
|
if obj != "none":
|
||||||
|
objectives_list.append(obj)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"objectives": objectives_list,
|
||||||
|
"constraints": answers.get("constraints", {}),
|
||||||
|
"design_variables": answers.get("design_variables", []),
|
||||||
|
"design_variable_count": len(answers.get("design_variables", [])),
|
||||||
|
"analysis_types": answers.get("analysis_types", []),
|
||||||
|
"solve_all_solutions": answers.get("solve_all_solutions", True),
|
||||||
|
"n_trials": answers.get("n_trials", 100),
|
||||||
|
"introspection": introspection,
|
||||||
|
"material": introspection.get("material"),
|
||||||
|
"baseline_violations": state.get_answer("baseline_violations"),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _evaluate_condition(self, condition: Dict[str, Any], context: Dict[str, Any]) -> bool:
|
||||||
|
"""Evaluate a pattern condition against context."""
|
||||||
|
if not condition:
|
||||||
|
return False
|
||||||
|
|
||||||
|
cond_type = condition.get("type", "")
|
||||||
|
|
||||||
|
if cond_type == "and":
|
||||||
|
return all(
|
||||||
|
self._evaluate_condition(c, context)
|
||||||
|
for c in condition.get("conditions", [])
|
||||||
|
)
|
||||||
|
|
||||||
|
elif cond_type == "or":
|
||||||
|
return any(
|
||||||
|
self._evaluate_condition(c, context)
|
||||||
|
for c in condition.get("conditions", [])
|
||||||
|
)
|
||||||
|
|
||||||
|
elif cond_type == "not":
|
||||||
|
inner = condition.get("condition", {})
|
||||||
|
return not self._evaluate_condition(inner, context)
|
||||||
|
|
||||||
|
elif cond_type == "contains":
|
||||||
|
field_value = self._get_field(context, condition.get("field", ""))
|
||||||
|
target = condition.get("value")
|
||||||
|
if isinstance(field_value, list):
|
||||||
|
return target in field_value
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cond_type == "not_contains":
|
||||||
|
field_value = self._get_field(context, condition.get("field", ""))
|
||||||
|
target = condition.get("value")
|
||||||
|
if isinstance(field_value, list):
|
||||||
|
return target not in field_value
|
||||||
|
return True
|
||||||
|
|
||||||
|
elif cond_type == "equals":
|
||||||
|
field_value = self._get_field(context, condition.get("field", ""))
|
||||||
|
return field_value == condition.get("value")
|
||||||
|
|
||||||
|
elif cond_type == "empty":
|
||||||
|
field_value = self._get_field(context, condition.get("field", ""))
|
||||||
|
if field_value is None:
|
||||||
|
return True
|
||||||
|
if isinstance(field_value, (list, dict, str)):
|
||||||
|
return len(field_value) == 0
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cond_type == "exists":
|
||||||
|
field_value = self._get_field(context, condition.get("field", ""))
|
||||||
|
return field_value is not None
|
||||||
|
|
||||||
|
elif cond_type == "not_exists":
|
||||||
|
field_value = self._get_field(context, condition.get("field", ""))
|
||||||
|
return field_value is None
|
||||||
|
|
||||||
|
elif cond_type == "greater_than":
|
||||||
|
field_value = self._get_field(context, condition.get("field", ""))
|
||||||
|
compare = condition.get("value")
|
||||||
|
|
||||||
|
# Handle compare_to (field reference)
|
||||||
|
if "compare_to" in condition:
|
||||||
|
compare_ref = condition["compare_to"]
|
||||||
|
if isinstance(compare_ref, dict):
|
||||||
|
# Dynamic calculation
|
||||||
|
if compare_ref.get("type") == "multiply":
|
||||||
|
base_value = self._get_field(context, compare_ref.get("field", ""))
|
||||||
|
if base_value is not None:
|
||||||
|
compare = base_value * compare_ref.get("value", 1)
|
||||||
|
else:
|
||||||
|
compare = self._get_field(context, compare_ref)
|
||||||
|
|
||||||
|
if field_value is not None and compare is not None:
|
||||||
|
try:
|
||||||
|
return float(field_value) > float(compare)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
return False
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cond_type == "less_than":
|
||||||
|
field_value = self._get_field(context, condition.get("field", ""))
|
||||||
|
compare = condition.get("value")
|
||||||
|
|
||||||
|
if "compare_to" in condition:
|
||||||
|
compare_ref = condition["compare_to"]
|
||||||
|
if isinstance(compare_ref, dict):
|
||||||
|
if compare_ref.get("type") == "multiply":
|
||||||
|
base_value = self._get_field(context, compare_ref.get("field", ""))
|
||||||
|
if base_value is not None:
|
||||||
|
compare = base_value * compare_ref.get("value", 1)
|
||||||
|
else:
|
||||||
|
compare = self._get_field(context, compare_ref)
|
||||||
|
|
||||||
|
if field_value is not None and compare is not None:
|
||||||
|
try:
|
||||||
|
return float(field_value) < float(compare)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
return False
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cond_type == "count_greater_than":
|
||||||
|
field_value = self._get_field(context, condition.get("field", ""))
|
||||||
|
if isinstance(field_value, (list, dict)):
|
||||||
|
return len(field_value) > condition.get("value", 0)
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cond_type == "count_equals":
|
||||||
|
field_value = self._get_field(context, condition.get("field", ""))
|
||||||
|
if isinstance(field_value, (list, dict)):
|
||||||
|
return len(field_value) == condition.get("value", 0)
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cond_type == "any_of":
|
||||||
|
# Check if any item in array matches a condition
|
||||||
|
field_value = self._get_field(context, condition.get("field", ""))
|
||||||
|
if not isinstance(field_value, list):
|
||||||
|
return False
|
||||||
|
check = condition.get("check", {})
|
||||||
|
for item in field_value:
|
||||||
|
if isinstance(item, dict):
|
||||||
|
item_context = {**context, "item": item}
|
||||||
|
if self._evaluate_condition(check, item_context):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cond_type == "ratio_greater_than":
|
||||||
|
# For bounds checking
|
||||||
|
fields = condition.get("field", [])
|
||||||
|
if len(fields) == 2:
|
||||||
|
val1 = self._get_field(context, f"item.{fields[0]}")
|
||||||
|
val2 = self._get_field(context, f"item.{fields[1]}")
|
||||||
|
if val1 and val2 and val2 != 0:
|
||||||
|
try:
|
||||||
|
return float(val1) / float(val2) > condition.get("value", 1)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
return False
|
||||||
|
return False
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _get_field(self, context: Dict[str, Any], field_path: str) -> Any:
|
||||||
|
"""Get a field value from context using dot notation."""
|
||||||
|
if not field_path:
|
||||||
|
return None
|
||||||
|
|
||||||
|
parts = field_path.split(".")
|
||||||
|
current = context
|
||||||
|
|
||||||
|
for part in parts:
|
||||||
|
if current is None:
|
||||||
|
return None
|
||||||
|
if isinstance(current, dict):
|
||||||
|
current = current.get(part)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return current
|
||||||
|
|
||||||
|
|
||||||
|
class EngineeringValidator:
|
||||||
|
"""
|
||||||
|
Main engineering validator.
|
||||||
|
|
||||||
|
Combines materials database and anti-pattern detection with
|
||||||
|
additional validation logic.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize validator with materials DB and anti-pattern detector."""
|
||||||
|
self.materials_db = MaterialsDatabase()
|
||||||
|
self.anti_patterns = AntiPatternDetector()
|
||||||
|
|
||||||
|
def validate_constraint(
|
||||||
|
self,
|
||||||
|
constraint_type: str,
|
||||||
|
value: float,
|
||||||
|
material: Optional[str] = None,
|
||||||
|
baseline: Optional[float] = None
|
||||||
|
) -> ValidationResult:
|
||||||
|
"""
|
||||||
|
Validate a constraint value against engineering limits.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
constraint_type: Type of constraint (stress, displacement, frequency)
|
||||||
|
value: Constraint value
|
||||||
|
material: Optional material name for property lookups
|
||||||
|
baseline: Optional baseline value for feasibility check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ValidationResult
|
||||||
|
"""
|
||||||
|
if constraint_type == "stress" and material:
|
||||||
|
return self.materials_db.validate_stress_limit(material, value)
|
||||||
|
|
||||||
|
# Check against baseline if available
|
||||||
|
if baseline is not None:
|
||||||
|
if constraint_type in ["stress", "displacement"]:
|
||||||
|
# Max constraint - baseline should be under limit
|
||||||
|
if baseline > value:
|
||||||
|
return ValidationResult(
|
||||||
|
valid=True,
|
||||||
|
message=f"Baseline ({baseline:.2f}) exceeds limit ({value}). Optimization starts infeasible.",
|
||||||
|
severity="warning",
|
||||||
|
suggestion="Consider relaxing the constraint or improving the baseline design"
|
||||||
|
)
|
||||||
|
elif constraint_type == "frequency":
|
||||||
|
# Min constraint - baseline should be above limit
|
||||||
|
if baseline < value:
|
||||||
|
return ValidationResult(
|
||||||
|
valid=True,
|
||||||
|
message=f"Baseline frequency ({baseline:.2f} Hz) is below limit ({value} Hz). Optimization starts infeasible.",
|
||||||
|
severity="warning",
|
||||||
|
suggestion="Consider relaxing the constraint"
|
||||||
|
)
|
||||||
|
|
||||||
|
return ValidationResult(
|
||||||
|
valid=True,
|
||||||
|
message=f"Constraint {constraint_type} = {value} accepted",
|
||||||
|
severity="ok"
|
||||||
|
)
|
||||||
|
|
||||||
|
def validate_bounds(
|
||||||
|
self,
|
||||||
|
parameter: str,
|
||||||
|
min_value: float,
|
||||||
|
max_value: float,
|
||||||
|
current_value: Optional[float] = None
|
||||||
|
) -> ValidationResult:
|
||||||
|
"""
|
||||||
|
Validate design variable bounds.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
parameter: Parameter name
|
||||||
|
min_value: Lower bound
|
||||||
|
max_value: Upper bound
|
||||||
|
current_value: Current/nominal value
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ValidationResult
|
||||||
|
"""
|
||||||
|
if min_value >= max_value:
|
||||||
|
return ValidationResult(
|
||||||
|
valid=False,
|
||||||
|
message=f"Invalid bounds for {parameter}: min ({min_value}) >= max ({max_value})",
|
||||||
|
severity="error",
|
||||||
|
field=parameter
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check bounds width
|
||||||
|
if min_value > 0:
|
||||||
|
ratio = max_value / min_value
|
||||||
|
if ratio > 10:
|
||||||
|
return ValidationResult(
|
||||||
|
valid=True,
|
||||||
|
message=f"Wide bounds for {parameter}: ratio {ratio:.1f}x may slow convergence",
|
||||||
|
severity="warning",
|
||||||
|
suggestion=f"Consider narrowing to {min_value:.2f} - {min_value * 5:.2f}",
|
||||||
|
field=parameter
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if current value is within bounds
|
||||||
|
if current_value is not None:
|
||||||
|
if current_value < min_value or current_value > max_value:
|
||||||
|
return ValidationResult(
|
||||||
|
valid=True,
|
||||||
|
message=f"Current value ({current_value}) for {parameter} is outside bounds [{min_value}, {max_value}]",
|
||||||
|
severity="warning",
|
||||||
|
suggestion="Adjust bounds to include current value or update nominal design",
|
||||||
|
field=parameter
|
||||||
|
)
|
||||||
|
|
||||||
|
return ValidationResult(
|
||||||
|
valid=True,
|
||||||
|
message=f"Bounds for {parameter} are valid",
|
||||||
|
severity="ok",
|
||||||
|
field=parameter
|
||||||
|
)
|
||||||
|
|
||||||
|
def suggest_bounds(
|
||||||
|
self,
|
||||||
|
parameter: str,
|
||||||
|
current_value: float,
|
||||||
|
context: Optional[Dict[str, Any]] = None
|
||||||
|
) -> Tuple[float, float]:
|
||||||
|
"""
|
||||||
|
Suggest reasonable bounds for a design variable.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
parameter: Parameter name
|
||||||
|
current_value: Current value
|
||||||
|
context: Optional context (material, application, etc.)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (suggested_min, suggested_max)
|
||||||
|
"""
|
||||||
|
# Default: +/- 50% of current value
|
||||||
|
if current_value > 0:
|
||||||
|
suggested_min = current_value * 0.5
|
||||||
|
suggested_max = current_value * 1.5
|
||||||
|
elif current_value < 0:
|
||||||
|
suggested_min = current_value * 1.5
|
||||||
|
suggested_max = current_value * 0.5
|
||||||
|
else:
|
||||||
|
suggested_min = -1.0
|
||||||
|
suggested_max = 1.0
|
||||||
|
|
||||||
|
# Adjust based on parameter name heuristics
|
||||||
|
name_lower = parameter.lower()
|
||||||
|
|
||||||
|
if "thickness" in name_lower:
|
||||||
|
# Thickness should stay positive with reasonable manufacturing limits
|
||||||
|
suggested_min = max(0.5, current_value * 0.3) # Min 0.5mm
|
||||||
|
suggested_max = current_value * 2.0
|
||||||
|
|
||||||
|
elif "radius" in name_lower or "fillet" in name_lower:
|
||||||
|
# Radii should stay positive
|
||||||
|
suggested_min = max(0.1, current_value * 0.2)
|
||||||
|
suggested_max = current_value * 3.0
|
||||||
|
|
||||||
|
elif "angle" in name_lower:
|
||||||
|
# Angles often have natural limits
|
||||||
|
suggested_min = max(-90, current_value - 30)
|
||||||
|
suggested_max = min(90, current_value + 30)
|
||||||
|
|
||||||
|
return (round(suggested_min, 3), round(suggested_max, 3))
|
||||||
|
|
||||||
|
def detect_anti_patterns(
|
||||||
|
self,
|
||||||
|
state: "InterviewState",
|
||||||
|
introspection: Optional[Dict[str, Any]] = None
|
||||||
|
) -> List[AntiPattern]:
|
||||||
|
"""
|
||||||
|
Detect common optimization anti-patterns.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: Current interview state
|
||||||
|
introspection: Optional introspection results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of detected anti-patterns
|
||||||
|
"""
|
||||||
|
return self.anti_patterns.check_all(state, introspection or {})
|
||||||
|
|
||||||
|
def validate_all(
|
||||||
|
self,
|
||||||
|
state: "InterviewState",
|
||||||
|
introspection: Optional[Dict[str, Any]] = None
|
||||||
|
) -> List[ValidationResult]:
|
||||||
|
"""
|
||||||
|
Run all validations on interview state.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: Current interview state
|
||||||
|
introspection: Optional introspection results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of all validation results
|
||||||
|
"""
|
||||||
|
results = []
|
||||||
|
answers = state.answers
|
||||||
|
intro = introspection or {}
|
||||||
|
|
||||||
|
# Validate constraints
|
||||||
|
if "max_stress" in answers.get("constraints", {}):
|
||||||
|
material = intro.get("material", {}).get("name")
|
||||||
|
result = self.validate_constraint(
|
||||||
|
"stress",
|
||||||
|
answers["constraints"]["max_stress"],
|
||||||
|
material=material,
|
||||||
|
baseline=intro.get("baseline_stress")
|
||||||
|
)
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
if "max_displacement" in answers.get("constraints", {}):
|
||||||
|
result = self.validate_constraint(
|
||||||
|
"displacement",
|
||||||
|
answers["constraints"]["max_displacement"],
|
||||||
|
baseline=intro.get("baseline_displacement")
|
||||||
|
)
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
if "min_frequency" in answers.get("constraints", {}):
|
||||||
|
result = self.validate_constraint(
|
||||||
|
"frequency",
|
||||||
|
answers["constraints"]["min_frequency"],
|
||||||
|
baseline=intro.get("baseline_frequency")
|
||||||
|
)
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
# Validate design variable bounds
|
||||||
|
for dv in answers.get("design_variables", []):
|
||||||
|
if isinstance(dv, dict):
|
||||||
|
result = self.validate_bounds(
|
||||||
|
dv.get("parameter", "unknown"),
|
||||||
|
dv.get("min_value", 0),
|
||||||
|
dv.get("max_value", 1),
|
||||||
|
dv.get("current_value")
|
||||||
|
)
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
# Check anti-patterns
|
||||||
|
anti_patterns = self.detect_anti_patterns(state, intro)
|
||||||
|
for ap in anti_patterns:
|
||||||
|
results.append(ValidationResult(
|
||||||
|
valid=ap.severity != "error",
|
||||||
|
message=f"[{ap.name}] {ap.description}",
|
||||||
|
severity=ap.severity,
|
||||||
|
suggestion=ap.fix_suggestion
|
||||||
|
))
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def has_blocking_issues(
|
||||||
|
self,
|
||||||
|
state: "InterviewState",
|
||||||
|
introspection: Optional[Dict[str, Any]] = None
|
||||||
|
) -> Tuple[bool, List[str]]:
|
||||||
|
"""
|
||||||
|
Check if there are any blocking issues.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (has_blocking, list_of_blocking_messages)
|
||||||
|
"""
|
||||||
|
results = self.validate_all(state, introspection)
|
||||||
|
blocking = [r.message for r in results if r.is_blocking()]
|
||||||
|
return len(blocking) > 0, blocking
|
||||||
|
|
||||||
|
|
||||||
|
# Import for type hints
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from .interview_state import InterviewState
|
||||||
648
optimization_engine/interview/interview_intelligence.py
Normal file
648
optimization_engine/interview/interview_intelligence.py
Normal file
@@ -0,0 +1,648 @@
|
|||||||
|
"""
|
||||||
|
Interview Intelligence
|
||||||
|
|
||||||
|
Smart features for the interview process:
|
||||||
|
- ExtractorMapper: Maps goals to appropriate extractors
|
||||||
|
- InterviewIntelligence: Auto-detection, inference, complexity determination
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Any, Optional, Literal, Tuple
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ExtractorSelection:
|
||||||
|
"""Result of mapping a goal to an extractor."""
|
||||||
|
extractor_id: str
|
||||||
|
extractor_name: str
|
||||||
|
goal_type: str # minimize, maximize, target
|
||||||
|
params: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
fallback: Optional[str] = None
|
||||||
|
confidence: float = 1.0
|
||||||
|
notes: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class ExtractorMapper:
|
||||||
|
"""
|
||||||
|
Maps physics goals to appropriate extractors.
|
||||||
|
|
||||||
|
Uses the Atomizer extractor library (SYS_12) to select
|
||||||
|
the right extractor for each objective or constraint.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Goal to extractor mapping
|
||||||
|
GOAL_MAP = {
|
||||||
|
# Mass objectives
|
||||||
|
"minimize_mass": ExtractorSelection(
|
||||||
|
extractor_id="E4",
|
||||||
|
extractor_name="BDF Mass Extraction",
|
||||||
|
goal_type="minimize",
|
||||||
|
fallback="E5",
|
||||||
|
notes="Uses BDF parsing for accurate mass. Falls back to NX expression."
|
||||||
|
),
|
||||||
|
"minimize_weight": ExtractorSelection(
|
||||||
|
extractor_id="E4",
|
||||||
|
extractor_name="BDF Mass Extraction",
|
||||||
|
goal_type="minimize",
|
||||||
|
fallback="E5"
|
||||||
|
),
|
||||||
|
|
||||||
|
# Displacement/stiffness objectives
|
||||||
|
"minimize_displacement": ExtractorSelection(
|
||||||
|
extractor_id="E1",
|
||||||
|
extractor_name="Displacement Extraction",
|
||||||
|
goal_type="minimize",
|
||||||
|
params={"component": "magnitude", "node_id": "auto"},
|
||||||
|
notes="Extracts displacement magnitude. Node ID auto-detected from max."
|
||||||
|
),
|
||||||
|
"maximize_stiffness": ExtractorSelection(
|
||||||
|
extractor_id="E1",
|
||||||
|
extractor_name="Displacement Extraction",
|
||||||
|
goal_type="minimize", # Stiffness = 1/displacement
|
||||||
|
params={"component": "magnitude", "node_id": "auto"},
|
||||||
|
notes="Stiffness maximization = displacement minimization"
|
||||||
|
),
|
||||||
|
|
||||||
|
# Frequency objectives
|
||||||
|
"maximize_frequency": ExtractorSelection(
|
||||||
|
extractor_id="E2",
|
||||||
|
extractor_name="Frequency Extraction",
|
||||||
|
goal_type="maximize",
|
||||||
|
params={"mode_number": 1},
|
||||||
|
notes="First natural frequency. Mode number adjustable."
|
||||||
|
),
|
||||||
|
"target_frequency": ExtractorSelection(
|
||||||
|
extractor_id="E2",
|
||||||
|
extractor_name="Frequency Extraction",
|
||||||
|
goal_type="target",
|
||||||
|
params={"mode_number": 1, "target": None},
|
||||||
|
notes="Target a specific frequency value."
|
||||||
|
),
|
||||||
|
|
||||||
|
# Stress objectives
|
||||||
|
"minimize_stress": ExtractorSelection(
|
||||||
|
extractor_id="E3",
|
||||||
|
extractor_name="Solid Stress Extraction",
|
||||||
|
goal_type="minimize",
|
||||||
|
params={"element_type": "auto", "stress_type": "von_mises"},
|
||||||
|
notes="Von Mises stress. Element type auto-detected."
|
||||||
|
),
|
||||||
|
|
||||||
|
# Optical objectives
|
||||||
|
"minimize_wavefront_error": ExtractorSelection(
|
||||||
|
extractor_id="E8",
|
||||||
|
extractor_name="Zernike Wavefront Fitting",
|
||||||
|
goal_type="minimize",
|
||||||
|
params={"n_terms": 15, "radius": "auto"},
|
||||||
|
notes="Fits surface to Zernike polynomials. Optical applications."
|
||||||
|
),
|
||||||
|
|
||||||
|
# Custom
|
||||||
|
"custom": ExtractorSelection(
|
||||||
|
extractor_id="custom",
|
||||||
|
extractor_name="Custom Extractor",
|
||||||
|
goal_type="custom",
|
||||||
|
confidence=0.5,
|
||||||
|
notes="User will define custom extraction logic."
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Constraint type to extractor mapping
|
||||||
|
CONSTRAINT_MAP = {
|
||||||
|
"stress": ExtractorSelection(
|
||||||
|
extractor_id="E3",
|
||||||
|
extractor_name="Solid Stress Extraction",
|
||||||
|
goal_type="max",
|
||||||
|
params={"stress_type": "von_mises"}
|
||||||
|
),
|
||||||
|
"max_stress": ExtractorSelection(
|
||||||
|
extractor_id="E3",
|
||||||
|
extractor_name="Solid Stress Extraction",
|
||||||
|
goal_type="max",
|
||||||
|
params={"stress_type": "von_mises"}
|
||||||
|
),
|
||||||
|
"displacement": ExtractorSelection(
|
||||||
|
extractor_id="E1",
|
||||||
|
extractor_name="Displacement Extraction",
|
||||||
|
goal_type="max",
|
||||||
|
params={"component": "magnitude"}
|
||||||
|
),
|
||||||
|
"max_displacement": ExtractorSelection(
|
||||||
|
extractor_id="E1",
|
||||||
|
extractor_name="Displacement Extraction",
|
||||||
|
goal_type="max",
|
||||||
|
params={"component": "magnitude"}
|
||||||
|
),
|
||||||
|
"frequency": ExtractorSelection(
|
||||||
|
extractor_id="E2",
|
||||||
|
extractor_name="Frequency Extraction",
|
||||||
|
goal_type="min",
|
||||||
|
params={"mode_number": 1}
|
||||||
|
),
|
||||||
|
"min_frequency": ExtractorSelection(
|
||||||
|
extractor_id="E2",
|
||||||
|
extractor_name="Frequency Extraction",
|
||||||
|
goal_type="min",
|
||||||
|
params={"mode_number": 1}
|
||||||
|
),
|
||||||
|
"mass": ExtractorSelection(
|
||||||
|
extractor_id="E4",
|
||||||
|
extractor_name="BDF Mass Extraction",
|
||||||
|
goal_type="max"
|
||||||
|
),
|
||||||
|
"max_mass": ExtractorSelection(
|
||||||
|
extractor_id="E4",
|
||||||
|
extractor_name="BDF Mass Extraction",
|
||||||
|
goal_type="max"
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
def map_goal_to_extractor(
|
||||||
|
self,
|
||||||
|
goal: str,
|
||||||
|
introspection: Optional[Dict[str, Any]] = None
|
||||||
|
) -> ExtractorSelection:
|
||||||
|
"""
|
||||||
|
Map a physics goal to the appropriate extractor.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
goal: Goal identifier (e.g., "minimize_mass")
|
||||||
|
introspection: Optional introspection results for auto-detection
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ExtractorSelection with extractor details
|
||||||
|
"""
|
||||||
|
goal_lower = goal.lower().strip()
|
||||||
|
|
||||||
|
# Direct match
|
||||||
|
if goal_lower in self.GOAL_MAP:
|
||||||
|
selection = self.GOAL_MAP[goal_lower]
|
||||||
|
|
||||||
|
# Auto-detect parameters if introspection available
|
||||||
|
if introspection:
|
||||||
|
selection = self._refine_selection(selection, introspection)
|
||||||
|
|
||||||
|
return selection
|
||||||
|
|
||||||
|
# Fuzzy matching for common variations
|
||||||
|
for key, selection in self.GOAL_MAP.items():
|
||||||
|
if key.replace("_", " ") in goal_lower or goal_lower in key:
|
||||||
|
return selection
|
||||||
|
|
||||||
|
# Default to custom
|
||||||
|
return self.GOAL_MAP["custom"]
|
||||||
|
|
||||||
|
def map_constraint_to_extractor(
|
||||||
|
self,
|
||||||
|
constraint_type: str,
|
||||||
|
introspection: Optional[Dict[str, Any]] = None
|
||||||
|
) -> ExtractorSelection:
|
||||||
|
"""
|
||||||
|
Map a constraint type to the appropriate extractor.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
constraint_type: Constraint type (e.g., "stress", "displacement")
|
||||||
|
introspection: Optional introspection results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ExtractorSelection with extractor details
|
||||||
|
"""
|
||||||
|
type_lower = constraint_type.lower().strip()
|
||||||
|
|
||||||
|
if type_lower in self.CONSTRAINT_MAP:
|
||||||
|
selection = self.CONSTRAINT_MAP[type_lower]
|
||||||
|
|
||||||
|
if introspection:
|
||||||
|
selection = self._refine_selection(selection, introspection)
|
||||||
|
|
||||||
|
return selection
|
||||||
|
|
||||||
|
# Try to infer from name
|
||||||
|
if "stress" in type_lower:
|
||||||
|
return self.CONSTRAINT_MAP["stress"]
|
||||||
|
if "disp" in type_lower or "deflect" in type_lower:
|
||||||
|
return self.CONSTRAINT_MAP["displacement"]
|
||||||
|
if "freq" in type_lower or "modal" in type_lower:
|
||||||
|
return self.CONSTRAINT_MAP["frequency"]
|
||||||
|
if "mass" in type_lower or "weight" in type_lower:
|
||||||
|
return self.CONSTRAINT_MAP["mass"]
|
||||||
|
|
||||||
|
return ExtractorSelection(
|
||||||
|
extractor_id="custom",
|
||||||
|
extractor_name="Custom Constraint",
|
||||||
|
goal_type="constraint",
|
||||||
|
confidence=0.5
|
||||||
|
)
|
||||||
|
|
||||||
|
def _refine_selection(
|
||||||
|
self,
|
||||||
|
selection: ExtractorSelection,
|
||||||
|
introspection: Dict[str, Any]
|
||||||
|
) -> ExtractorSelection:
|
||||||
|
"""Refine extractor selection based on introspection."""
|
||||||
|
import copy
|
||||||
|
refined = copy.deepcopy(selection)
|
||||||
|
|
||||||
|
# Auto-detect element type for stress extraction
|
||||||
|
if refined.extractor_id == "E3" and refined.params.get("element_type") == "auto":
|
||||||
|
element_types = introspection.get("element_types", [])
|
||||||
|
if "solid" in element_types or any("TET" in e or "HEX" in e for e in element_types):
|
||||||
|
refined.params["element_type"] = "solid"
|
||||||
|
elif "shell" in element_types or any("QUAD" in e or "TRI" in e for e in element_types):
|
||||||
|
refined.params["element_type"] = "shell"
|
||||||
|
refined.extractor_id = "E3_shell" # Use shell stress extractor
|
||||||
|
|
||||||
|
# Auto-detect node for displacement
|
||||||
|
if refined.extractor_id == "E1" and refined.params.get("node_id") == "auto":
|
||||||
|
# Use max displacement node from baseline if available
|
||||||
|
if "max_disp_node" in introspection:
|
||||||
|
refined.params["node_id"] = introspection["max_disp_node"]
|
||||||
|
|
||||||
|
return refined
|
||||||
|
|
||||||
|
def get_extractor_summary(self, selections: List[ExtractorSelection]) -> str:
|
||||||
|
"""Generate a summary of selected extractors."""
|
||||||
|
lines = ["**Selected Extractors:**", ""]
|
||||||
|
|
||||||
|
for sel in selections:
|
||||||
|
params_str = ""
|
||||||
|
if sel.params:
|
||||||
|
params_str = " (" + ", ".join(f"{k}={v}" for k, v in sel.params.items()) + ")"
|
||||||
|
|
||||||
|
lines.append(f"- **{sel.extractor_id}**: {sel.extractor_name}{params_str}")
|
||||||
|
if sel.notes:
|
||||||
|
lines.append(f" > {sel.notes}")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class StudyTypeInference:
|
||||||
|
"""Result of inferring study type."""
|
||||||
|
study_type: str # single_objective, multi_objective, parametric
|
||||||
|
protocol: str # protocol_10_single, protocol_11_multi
|
||||||
|
confidence: float
|
||||||
|
reasons: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
class InterviewIntelligence:
|
||||||
|
"""
|
||||||
|
Smart features for the interview process.
|
||||||
|
|
||||||
|
Provides:
|
||||||
|
- Study type inference from context
|
||||||
|
- Auto-selection of extractors
|
||||||
|
- History-based suggestions
|
||||||
|
- Complexity determination
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize intelligence module."""
|
||||||
|
self.extractor_mapper = ExtractorMapper()
|
||||||
|
|
||||||
|
def infer_study_type(
|
||||||
|
self,
|
||||||
|
study_name: str,
|
||||||
|
user_description: str,
|
||||||
|
introspection: Optional[Dict[str, Any]] = None
|
||||||
|
) -> StudyTypeInference:
|
||||||
|
"""
|
||||||
|
Infer study type from available context.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
study_name: Study name (may contain hints)
|
||||||
|
user_description: User's problem description
|
||||||
|
introspection: Optional introspection results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
StudyTypeInference with type and protocol
|
||||||
|
"""
|
||||||
|
reasons = []
|
||||||
|
score_multi = 0
|
||||||
|
score_single = 0
|
||||||
|
|
||||||
|
text = f"{study_name} {user_description}".lower()
|
||||||
|
|
||||||
|
# Check for multi-objective keywords
|
||||||
|
if any(kw in text for kw in ["pareto", "trade-off", "tradeoff", "multi-objective", "multiobjective"]):
|
||||||
|
score_multi += 2
|
||||||
|
reasons.append("Multi-objective keywords detected")
|
||||||
|
|
||||||
|
if any(kw in text for kw in ["versus", " vs ", "and minimize", "and maximize", "balance"]):
|
||||||
|
score_multi += 1
|
||||||
|
reasons.append("Conflicting goals language detected")
|
||||||
|
|
||||||
|
# Check for single-objective keywords
|
||||||
|
if any(kw in text for kw in ["minimize", "maximize", "reduce", "increase"]):
|
||||||
|
# Count occurrences
|
||||||
|
count = sum(1 for kw in ["minimize", "maximize", "reduce", "increase"] if kw in text)
|
||||||
|
if count == 1:
|
||||||
|
score_single += 1
|
||||||
|
reasons.append("Single optimization goal language")
|
||||||
|
else:
|
||||||
|
score_multi += 1
|
||||||
|
reasons.append("Multiple optimization verbs detected")
|
||||||
|
|
||||||
|
# Default to single objective if no strong signals
|
||||||
|
if score_multi > score_single:
|
||||||
|
return StudyTypeInference(
|
||||||
|
study_type="multi_objective",
|
||||||
|
protocol="protocol_11_multi",
|
||||||
|
confidence=min(1.0, 0.5 + score_multi * 0.2),
|
||||||
|
reasons=reasons
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return StudyTypeInference(
|
||||||
|
study_type="single_objective",
|
||||||
|
protocol="protocol_10_single",
|
||||||
|
confidence=min(1.0, 0.5 + score_single * 0.2),
|
||||||
|
reasons=reasons if reasons else ["Default to single-objective"]
|
||||||
|
)
|
||||||
|
|
||||||
|
def auto_select_extractors(
|
||||||
|
self,
|
||||||
|
objectives: List[Dict[str, Any]],
|
||||||
|
constraints: List[Dict[str, Any]],
|
||||||
|
introspection: Optional[Dict[str, Any]] = None
|
||||||
|
) -> Dict[str, ExtractorSelection]:
|
||||||
|
"""
|
||||||
|
Automatically select appropriate extractors.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
objectives: List of objective definitions
|
||||||
|
constraints: List of constraint definitions
|
||||||
|
introspection: Optional introspection results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict mapping objective/constraint names to ExtractorSelection
|
||||||
|
"""
|
||||||
|
selections = {}
|
||||||
|
|
||||||
|
# Map objectives
|
||||||
|
for i, obj in enumerate(objectives):
|
||||||
|
goal = obj.get("goal", "") if isinstance(obj, dict) else str(obj)
|
||||||
|
name = obj.get("name", f"objective_{i}") if isinstance(obj, dict) else f"objective_{i}"
|
||||||
|
|
||||||
|
selection = self.extractor_mapper.map_goal_to_extractor(goal, introspection)
|
||||||
|
selections[name] = selection
|
||||||
|
|
||||||
|
# Map constraints
|
||||||
|
for i, con in enumerate(constraints):
|
||||||
|
con_type = con.get("type", "") if isinstance(con, dict) else str(con)
|
||||||
|
name = con.get("name", f"constraint_{i}") if isinstance(con, dict) else f"constraint_{i}"
|
||||||
|
|
||||||
|
selection = self.extractor_mapper.map_constraint_to_extractor(con_type, introspection)
|
||||||
|
selections[name] = selection
|
||||||
|
|
||||||
|
return selections
|
||||||
|
|
||||||
|
def determine_complexity(
|
||||||
|
self,
|
||||||
|
state: "InterviewState",
|
||||||
|
introspection: Optional[Dict[str, Any]] = None
|
||||||
|
) -> Literal["simple", "moderate", "complex"]:
|
||||||
|
"""
|
||||||
|
Determine study complexity for adaptive questioning.
|
||||||
|
|
||||||
|
Based on:
|
||||||
|
- Number of objectives
|
||||||
|
- Number of design variables
|
||||||
|
- Analysis complexity
|
||||||
|
- Custom components
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: Current interview state
|
||||||
|
introspection: Optional introspection results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Complexity level
|
||||||
|
"""
|
||||||
|
score = 0
|
||||||
|
answers = state.answers
|
||||||
|
|
||||||
|
# Objectives
|
||||||
|
n_obj = len(answers.get("objectives", []))
|
||||||
|
secondary = answers.get("objectives_secondary", [])
|
||||||
|
if "none" not in secondary:
|
||||||
|
n_obj += len(secondary)
|
||||||
|
|
||||||
|
if n_obj == 1:
|
||||||
|
score += 0
|
||||||
|
elif n_obj == 2:
|
||||||
|
score += 1
|
||||||
|
else:
|
||||||
|
score += 2
|
||||||
|
|
||||||
|
# Design variables
|
||||||
|
n_dvs = len(answers.get("design_variables", []))
|
||||||
|
if n_dvs <= 3:
|
||||||
|
score += 0
|
||||||
|
elif n_dvs <= 6:
|
||||||
|
score += 1
|
||||||
|
else:
|
||||||
|
score += 2
|
||||||
|
|
||||||
|
# Analysis types
|
||||||
|
analysis_types = answers.get("analysis_types", [])
|
||||||
|
if len(analysis_types) > 2:
|
||||||
|
score += 2
|
||||||
|
elif len(analysis_types) > 1:
|
||||||
|
score += 1
|
||||||
|
|
||||||
|
if "coupled_thermal_structural" in analysis_types:
|
||||||
|
score += 1
|
||||||
|
if "nonlinear" in analysis_types:
|
||||||
|
score += 1
|
||||||
|
|
||||||
|
# Introspection complexity
|
||||||
|
if introspection:
|
||||||
|
if introspection.get("multiple_solutions", False):
|
||||||
|
score += 1
|
||||||
|
if len(introspection.get("expressions", [])) > 20:
|
||||||
|
score += 1
|
||||||
|
|
||||||
|
# Categorize
|
||||||
|
if score <= 2:
|
||||||
|
return "simple"
|
||||||
|
elif score <= 5:
|
||||||
|
return "moderate"
|
||||||
|
else:
|
||||||
|
return "complex"
|
||||||
|
|
||||||
|
def suggest_trial_count(
|
||||||
|
self,
|
||||||
|
n_design_variables: int,
|
||||||
|
n_objectives: int,
|
||||||
|
complexity: str
|
||||||
|
) -> int:
|
||||||
|
"""
|
||||||
|
Suggest appropriate number of trials.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
n_design_variables: Number of design variables
|
||||||
|
n_objectives: Number of objectives
|
||||||
|
complexity: Study complexity level
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Suggested trial count
|
||||||
|
"""
|
||||||
|
# Base: 15 trials per design variable
|
||||||
|
base = n_design_variables * 15
|
||||||
|
|
||||||
|
# Multi-objective needs more
|
||||||
|
if n_objectives > 1:
|
||||||
|
base = int(base * 1.5)
|
||||||
|
|
||||||
|
# Adjust for complexity
|
||||||
|
if complexity == "simple":
|
||||||
|
base = max(50, base)
|
||||||
|
elif complexity == "moderate":
|
||||||
|
base = max(100, base)
|
||||||
|
else:
|
||||||
|
base = max(150, base)
|
||||||
|
|
||||||
|
# Round to nice numbers
|
||||||
|
if base <= 50:
|
||||||
|
return 50
|
||||||
|
elif base <= 75:
|
||||||
|
return 75
|
||||||
|
elif base <= 100:
|
||||||
|
return 100
|
||||||
|
elif base <= 150:
|
||||||
|
return 150
|
||||||
|
elif base <= 200:
|
||||||
|
return 200
|
||||||
|
else:
|
||||||
|
return int((base // 100) * 100)
|
||||||
|
|
||||||
|
def suggest_sampler(
|
||||||
|
self,
|
||||||
|
n_objectives: int,
|
||||||
|
n_design_variables: int
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Suggest appropriate sampler/optimizer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
n_objectives: Number of objectives
|
||||||
|
n_design_variables: Number of design variables
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Sampler name
|
||||||
|
"""
|
||||||
|
if n_objectives > 1:
|
||||||
|
return "NSGA-II" # Multi-objective
|
||||||
|
elif n_design_variables <= 3:
|
||||||
|
return "TPE" # Tree-structured Parzen Estimator
|
||||||
|
elif n_design_variables <= 10:
|
||||||
|
return "CMA-ES" # Covariance Matrix Adaptation
|
||||||
|
else:
|
||||||
|
return "TPE" # TPE handles high dimensions well
|
||||||
|
|
||||||
|
def analyze_design_variable_candidates(
|
||||||
|
self,
|
||||||
|
expressions: List[Dict[str, Any]]
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Analyze expressions to find design variable candidates.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
expressions: List of expressions from introspection
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Sorted list of candidates with scores
|
||||||
|
"""
|
||||||
|
candidates = []
|
||||||
|
|
||||||
|
# High confidence patterns
|
||||||
|
high_patterns = [
|
||||||
|
(r"thickness", "Thickness parameter"),
|
||||||
|
(r"width", "Width parameter"),
|
||||||
|
(r"height", "Height parameter"),
|
||||||
|
(r"diameter", "Diameter parameter"),
|
||||||
|
(r"radius", "Radius parameter"),
|
||||||
|
(r"length", "Length parameter"),
|
||||||
|
(r"depth", "Depth parameter"),
|
||||||
|
(r"angle", "Angle parameter"),
|
||||||
|
(r"fillet", "Fillet radius"),
|
||||||
|
(r"chamfer", "Chamfer dimension"),
|
||||||
|
(r"rib_", "Rib parameter"),
|
||||||
|
(r"wall_", "Wall parameter"),
|
||||||
|
(r"flange_", "Flange parameter"),
|
||||||
|
]
|
||||||
|
|
||||||
|
# Medium confidence patterns
|
||||||
|
medium_patterns = [
|
||||||
|
(r"dim_", "Dimension parameter"),
|
||||||
|
(r"size_", "Size parameter"),
|
||||||
|
(r"param_", "Named parameter"),
|
||||||
|
(r"^p\d+$", "Numbered parameter"),
|
||||||
|
(r"var_", "Variable"),
|
||||||
|
]
|
||||||
|
|
||||||
|
# Exclusion patterns
|
||||||
|
exclude_patterns = [
|
||||||
|
r"mesh_", r"count_", r"num_", r"material",
|
||||||
|
r"derived_", r"calc_", r"_result$", r"_output$",
|
||||||
|
r"^n\d+$", r"count$"
|
||||||
|
]
|
||||||
|
|
||||||
|
for expr in expressions:
|
||||||
|
name = expr.get("name", "")
|
||||||
|
value = expr.get("value")
|
||||||
|
formula = expr.get("formula", "")
|
||||||
|
|
||||||
|
# Skip non-numeric
|
||||||
|
if not isinstance(value, (int, float)):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Skip formulas (computed values)
|
||||||
|
if formula and formula != str(value):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check exclusions
|
||||||
|
if any(re.search(p, name.lower()) for p in exclude_patterns):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Score
|
||||||
|
score = 0
|
||||||
|
reason = "Named expression"
|
||||||
|
|
||||||
|
for pattern, desc in high_patterns:
|
||||||
|
if re.search(pattern, name.lower()):
|
||||||
|
score = 3
|
||||||
|
reason = desc
|
||||||
|
break
|
||||||
|
|
||||||
|
if score == 0:
|
||||||
|
for pattern, desc in medium_patterns:
|
||||||
|
if re.search(pattern, name.lower()):
|
||||||
|
score = 2
|
||||||
|
reason = desc
|
||||||
|
break
|
||||||
|
|
||||||
|
if score == 0 and len(name) > 2:
|
||||||
|
score = 1
|
||||||
|
|
||||||
|
if score > 0:
|
||||||
|
candidates.append({
|
||||||
|
"name": name,
|
||||||
|
"value": value,
|
||||||
|
"score": score,
|
||||||
|
"reason": reason,
|
||||||
|
"suggested_min": round(value * 0.5, 3) if value > 0 else round(value * 1.5, 3),
|
||||||
|
"suggested_max": round(value * 1.5, 3) if value > 0 else round(value * 0.5, 3),
|
||||||
|
})
|
||||||
|
|
||||||
|
# Sort by score descending
|
||||||
|
candidates.sort(key=lambda x: (-x["score"], x["name"]))
|
||||||
|
|
||||||
|
return candidates
|
||||||
|
|
||||||
|
|
||||||
|
# Import for type hints
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from .interview_state import InterviewState
|
||||||
588
optimization_engine/interview/interview_presenter.py
Normal file
588
optimization_engine/interview/interview_presenter.py
Normal file
@@ -0,0 +1,588 @@
|
|||||||
|
"""
|
||||||
|
Interview Presenter
|
||||||
|
|
||||||
|
Abstract presentation layer for different UI modes.
|
||||||
|
Handles:
|
||||||
|
- Formatting questions for display
|
||||||
|
- Parsing user responses
|
||||||
|
- Showing summaries and warnings
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Any, Optional, List, Dict
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .question_engine import Question, QuestionOption
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PresentedQuestion:
|
||||||
|
"""A question formatted for presentation."""
|
||||||
|
question_id: str
|
||||||
|
formatted_text: str
|
||||||
|
question_number: int
|
||||||
|
total_questions: int
|
||||||
|
category_name: str
|
||||||
|
|
||||||
|
|
||||||
|
class InterviewPresenter(ABC):
|
||||||
|
"""
|
||||||
|
Abstract base for interview presentation.
|
||||||
|
|
||||||
|
Different presenters handle UI-specific rendering:
|
||||||
|
- ClaudePresenter: Markdown for Claude conversation
|
||||||
|
- DashboardPresenter: WebSocket events for React UI (future)
|
||||||
|
- CLIPresenter: Interactive terminal prompts (future)
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def present_question(
|
||||||
|
self,
|
||||||
|
question: Question,
|
||||||
|
question_number: int,
|
||||||
|
total_questions: int,
|
||||||
|
category_name: str,
|
||||||
|
dynamic_content: Optional[str] = None
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Format a question for display.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
question: Question to present
|
||||||
|
question_number: Current question number
|
||||||
|
total_questions: Estimated total questions
|
||||||
|
category_name: Name of the question category
|
||||||
|
dynamic_content: Dynamic content to inject (e.g., extractor summary)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted question string
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def parse_response(self, response: str, question: Question) -> Any:
|
||||||
|
"""
|
||||||
|
Parse user's response into structured value.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
response: Raw user response
|
||||||
|
question: Question being answered
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Parsed answer value
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def show_summary(self, blueprint: "StudyBlueprint") -> str:
|
||||||
|
"""
|
||||||
|
Format interview summary/blueprint for display.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
blueprint: Generated study blueprint
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted summary string
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def show_warning(self, warning: str, severity: str = "warning") -> str:
|
||||||
|
"""
|
||||||
|
Format a warning message for display.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
warning: Warning message
|
||||||
|
severity: "error", "warning", or "info"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted warning string
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def show_progress(self, current: int, total: int, phase: str) -> str:
|
||||||
|
"""
|
||||||
|
Format progress indicator.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
current: Current question number
|
||||||
|
total: Estimated total questions
|
||||||
|
phase: Current phase name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted progress string
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ClaudePresenter(InterviewPresenter):
|
||||||
|
"""
|
||||||
|
Presenter for Claude conversation mode (VS Code, Web).
|
||||||
|
|
||||||
|
Formats questions and responses as markdown for natural
|
||||||
|
conversation flow with Claude.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def present_question(
|
||||||
|
self,
|
||||||
|
question: Question,
|
||||||
|
question_number: int,
|
||||||
|
total_questions: int,
|
||||||
|
category_name: str,
|
||||||
|
dynamic_content: Optional[str] = None
|
||||||
|
) -> str:
|
||||||
|
"""Format question as markdown for Claude to present."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Header with progress
|
||||||
|
lines.append(f"### Question {question_number} of ~{total_questions}: {category_name}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Main question text
|
||||||
|
lines.append(question.text)
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Dynamic content if provided
|
||||||
|
if dynamic_content:
|
||||||
|
lines.append(dynamic_content)
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Options for choice questions
|
||||||
|
if question.options and question.question_type in ["choice", "multi_choice"]:
|
||||||
|
for i, opt in enumerate(question.options, 1):
|
||||||
|
desc = f" - {opt.description}" if opt.description else ""
|
||||||
|
lines.append(f"{i}. **{opt.label}**{desc}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Help text
|
||||||
|
if question.help_text:
|
||||||
|
lines.append(f"> {question.help_text}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Engineering guidance
|
||||||
|
if question.engineering_guidance:
|
||||||
|
lines.append(f"> **Tip**: {question.engineering_guidance}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Default value hint
|
||||||
|
if question.default is not None and question.default != []:
|
||||||
|
if isinstance(question.default, list):
|
||||||
|
default_str = ", ".join(str(d) for d in question.default)
|
||||||
|
else:
|
||||||
|
default_str = str(question.default)
|
||||||
|
lines.append(f"*Default: {default_str}*")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Input prompt based on type
|
||||||
|
if question.question_type == "text":
|
||||||
|
lines.append("Please describe:")
|
||||||
|
elif question.question_type == "numeric":
|
||||||
|
units = question.validation.units if question.validation else ""
|
||||||
|
lines.append(f"Enter value{f' ({units})' if units else ''}:")
|
||||||
|
elif question.question_type == "choice":
|
||||||
|
lines.append("Type your choice (number or description):")
|
||||||
|
elif question.question_type == "multi_choice":
|
||||||
|
lines.append("Type your choices (numbers or descriptions, comma-separated):")
|
||||||
|
elif question.question_type == "confirm":
|
||||||
|
lines.append("Type **yes** or **no**:")
|
||||||
|
elif question.question_type == "parameter_select":
|
||||||
|
lines.append("Type parameter names (comma-separated) or select by number:")
|
||||||
|
elif question.question_type == "bounds":
|
||||||
|
lines.append("Enter bounds (e.g., '2 to 10' or 'min 2, max 10'):")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def parse_response(self, response: str, question: Question) -> Any:
|
||||||
|
"""Parse natural language response into structured answer."""
|
||||||
|
response = response.strip()
|
||||||
|
|
||||||
|
if question.question_type == "text":
|
||||||
|
return response
|
||||||
|
|
||||||
|
elif question.question_type == "numeric":
|
||||||
|
return self._parse_numeric(response, question)
|
||||||
|
|
||||||
|
elif question.question_type == "confirm":
|
||||||
|
return self._parse_confirm(response)
|
||||||
|
|
||||||
|
elif question.question_type == "choice":
|
||||||
|
return self._parse_choice(response, question)
|
||||||
|
|
||||||
|
elif question.question_type == "multi_choice":
|
||||||
|
return self._parse_multi_choice(response, question)
|
||||||
|
|
||||||
|
elif question.question_type == "parameter_select":
|
||||||
|
return self._parse_parameter_select(response, question)
|
||||||
|
|
||||||
|
elif question.question_type == "bounds":
|
||||||
|
return self._parse_bounds(response)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
def _parse_numeric(self, response: str, question: Question) -> Optional[float]:
|
||||||
|
"""Parse numeric response with unit handling."""
|
||||||
|
# Remove common unit suffixes
|
||||||
|
cleaned = re.sub(r'\s*(mm|cm|m|kg|g|MPa|Pa|GPa|Hz|kHz|MHz|°|deg)s?\s*$', '', response, flags=re.I)
|
||||||
|
|
||||||
|
# Extract number
|
||||||
|
match = re.search(r'[-+]?\d*\.?\d+', cleaned)
|
||||||
|
if match:
|
||||||
|
return float(match.group())
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_confirm(self, response: str) -> Optional[bool]:
|
||||||
|
"""Parse yes/no confirmation."""
|
||||||
|
lower = response.lower().strip()
|
||||||
|
|
||||||
|
# Positive responses
|
||||||
|
if lower in ["yes", "y", "true", "1", "ok", "sure", "yep", "yeah", "correct", "confirmed", "confirm", "affirmative"]:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Negative responses
|
||||||
|
if lower in ["no", "n", "false", "0", "nope", "nah", "cancel", "incorrect", "negative"]:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Try to detect intent from natural language
|
||||||
|
if "yes" in lower or "ok" in lower or "correct" in lower:
|
||||||
|
return True
|
||||||
|
if "no" in lower or "don't" in lower or "not" in lower:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_choice(self, response: str, question: Question) -> Any:
|
||||||
|
"""Parse single choice response."""
|
||||||
|
if not question.options:
|
||||||
|
return response
|
||||||
|
|
||||||
|
# Try by number
|
||||||
|
if response.isdigit():
|
||||||
|
idx = int(response) - 1
|
||||||
|
if 0 <= idx < len(question.options):
|
||||||
|
return question.options[idx].value
|
||||||
|
|
||||||
|
# Try by value (exact match)
|
||||||
|
for opt in question.options:
|
||||||
|
if response.lower() == str(opt.value).lower():
|
||||||
|
return opt.value
|
||||||
|
|
||||||
|
# Try by label (exact match)
|
||||||
|
for opt in question.options:
|
||||||
|
if response.lower() == opt.label.lower():
|
||||||
|
return opt.value
|
||||||
|
|
||||||
|
# Try fuzzy match on label
|
||||||
|
for opt in question.options:
|
||||||
|
if response.lower() in opt.label.lower():
|
||||||
|
return opt.value
|
||||||
|
|
||||||
|
# Return as-is for custom values
|
||||||
|
return response
|
||||||
|
|
||||||
|
def _parse_multi_choice(self, response: str, question: Question) -> List[Any]:
|
||||||
|
"""Parse multiple choice response."""
|
||||||
|
# Split by comma, 'and', or numbers
|
||||||
|
parts = re.split(r'[,&]|\band\b|\s+', response)
|
||||||
|
parts = [p.strip() for p in parts if p.strip()]
|
||||||
|
|
||||||
|
values = []
|
||||||
|
for part in parts:
|
||||||
|
if not part:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try by number
|
||||||
|
if part.isdigit() and question.options:
|
||||||
|
idx = int(part) - 1
|
||||||
|
if 0 <= idx < len(question.options):
|
||||||
|
value = question.options[idx].value
|
||||||
|
if value not in values:
|
||||||
|
values.append(value)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try by value/label
|
||||||
|
if question.options:
|
||||||
|
found = False
|
||||||
|
for opt in question.options:
|
||||||
|
if part.lower() == str(opt.value).lower() or part.lower() == opt.label.lower():
|
||||||
|
if opt.value not in values:
|
||||||
|
values.append(opt.value)
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
if part.lower() in opt.label.lower():
|
||||||
|
if opt.value not in values:
|
||||||
|
values.append(opt.value)
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
if found:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Add as custom value
|
||||||
|
if part not in values:
|
||||||
|
values.append(part)
|
||||||
|
|
||||||
|
return values
|
||||||
|
|
||||||
|
def _parse_parameter_select(self, response: str, question: Question) -> List[str]:
|
||||||
|
"""Parse parameter selection response."""
|
||||||
|
# Split by comma, 'and', or numbers
|
||||||
|
parts = re.split(r'[,&]|\band\b', response)
|
||||||
|
parameters = []
|
||||||
|
|
||||||
|
for part in parts:
|
||||||
|
part = part.strip()
|
||||||
|
if not part:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try by number if we have options
|
||||||
|
if part.isdigit() and question.options:
|
||||||
|
idx = int(part) - 1
|
||||||
|
if 0 <= idx < len(question.options):
|
||||||
|
parameters.append(question.options[idx].value)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Add as parameter name
|
||||||
|
parameters.append(part)
|
||||||
|
|
||||||
|
return parameters
|
||||||
|
|
||||||
|
def _parse_bounds(self, response: str) -> Optional[Dict[str, float]]:
|
||||||
|
"""Parse bounds specification."""
|
||||||
|
bounds = {}
|
||||||
|
|
||||||
|
# Try "min to max" format
|
||||||
|
match = re.search(r'(\d+\.?\d*)\s*(?:to|-)\s*(\d+\.?\d*)', response)
|
||||||
|
if match:
|
||||||
|
bounds["min"] = float(match.group(1))
|
||||||
|
bounds["max"] = float(match.group(2))
|
||||||
|
return bounds
|
||||||
|
|
||||||
|
# Try "min: X, max: Y" format
|
||||||
|
min_match = re.search(r'min[:\s]+(\d+\.?\d*)', response, re.I)
|
||||||
|
max_match = re.search(r'max[:\s]+(\d+\.?\d*)', response, re.I)
|
||||||
|
|
||||||
|
if min_match:
|
||||||
|
bounds["min"] = float(min_match.group(1))
|
||||||
|
if max_match:
|
||||||
|
bounds["max"] = float(max_match.group(1))
|
||||||
|
|
||||||
|
return bounds if bounds else None
|
||||||
|
|
||||||
|
def show_summary(self, blueprint: "StudyBlueprint") -> str:
|
||||||
|
"""Format interview summary/blueprint for display."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
lines.append(f"## Study Blueprint: {blueprint.study_name}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Description
|
||||||
|
if blueprint.study_description:
|
||||||
|
lines.append(f"**Description**: {blueprint.study_description}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Design Variables
|
||||||
|
lines.append(f"### Design Variables ({len(blueprint.design_variables)})")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("| Parameter | Current | Min | Max | Units |")
|
||||||
|
lines.append("|-----------|---------|-----|-----|-------|")
|
||||||
|
for dv in blueprint.design_variables:
|
||||||
|
lines.append(f"| {dv.parameter} | {dv.current_value} | {dv.min_value} | {dv.max_value} | {dv.units or '-'} |")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Objectives
|
||||||
|
lines.append(f"### Objectives ({len(blueprint.objectives)})")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("| Goal | Extractor | Parameters |")
|
||||||
|
lines.append("|------|-----------|------------|")
|
||||||
|
for obj in blueprint.objectives:
|
||||||
|
params = ", ".join(f"{k}={v}" for k, v in (obj.extractor_params or {}).items()) or "-"
|
||||||
|
lines.append(f"| {obj.goal} | {obj.extractor} | {params} |")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Constraints
|
||||||
|
if blueprint.constraints:
|
||||||
|
lines.append(f"### Constraints ({len(blueprint.constraints)})")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("| Type | Threshold | Extractor |")
|
||||||
|
lines.append("|------|-----------|-----------|")
|
||||||
|
for con in blueprint.constraints:
|
||||||
|
op = "<=" if con.constraint_type == "max" else ">="
|
||||||
|
lines.append(f"| {con.name} | {op} {con.threshold} | {con.extractor} |")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Settings
|
||||||
|
lines.append("### Settings")
|
||||||
|
lines.append("")
|
||||||
|
lines.append(f"- **Protocol**: {blueprint.protocol}")
|
||||||
|
lines.append(f"- **Trials**: {blueprint.n_trials}")
|
||||||
|
lines.append(f"- **Sampler**: {blueprint.sampler}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Warnings
|
||||||
|
if blueprint.warnings_acknowledged:
|
||||||
|
lines.append("### Acknowledged Warnings")
|
||||||
|
lines.append("")
|
||||||
|
for warning in blueprint.warnings_acknowledged:
|
||||||
|
lines.append(f"- {warning}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
lines.append("---")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("Does this look correct? Reply **yes** to generate the study, or describe what to change.")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def show_warning(self, warning: str, severity: str = "warning") -> str:
|
||||||
|
"""Format a warning message for display."""
|
||||||
|
icons = {
|
||||||
|
"error": "X",
|
||||||
|
"warning": "!",
|
||||||
|
"info": "i"
|
||||||
|
}
|
||||||
|
icon = icons.get(severity, "!")
|
||||||
|
|
||||||
|
if severity == "error":
|
||||||
|
return f"\n**[{icon}] ERROR**: {warning}\n"
|
||||||
|
elif severity == "warning":
|
||||||
|
return f"\n**[{icon}] Warning**: {warning}\n"
|
||||||
|
else:
|
||||||
|
return f"\n*[{icon}] Note*: {warning}\n"
|
||||||
|
|
||||||
|
def show_progress(self, current: int, total: int, phase: str) -> str:
|
||||||
|
"""Format progress indicator."""
|
||||||
|
percentage = int((current / total) * 100) if total > 0 else 0
|
||||||
|
bar_length = 20
|
||||||
|
filled = int(bar_length * current / total) if total > 0 else 0
|
||||||
|
bar = "=" * filled + "-" * (bar_length - filled)
|
||||||
|
|
||||||
|
return f"**Progress**: [{bar}] {percentage}% - {phase}"
|
||||||
|
|
||||||
|
|
||||||
|
class DashboardPresenter(InterviewPresenter):
|
||||||
|
"""
|
||||||
|
Presenter for dashboard UI mode (future).
|
||||||
|
|
||||||
|
Emits WebSocket events for React UI to render.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def present_question(
|
||||||
|
self,
|
||||||
|
question: Question,
|
||||||
|
question_number: int,
|
||||||
|
total_questions: int,
|
||||||
|
category_name: str,
|
||||||
|
dynamic_content: Optional[str] = None
|
||||||
|
) -> str:
|
||||||
|
"""Emit WebSocket event for dashboard to render."""
|
||||||
|
# This would emit an event to the dashboard
|
||||||
|
# For now, return JSON representation
|
||||||
|
import json
|
||||||
|
return json.dumps({
|
||||||
|
"type": "question",
|
||||||
|
"data": {
|
||||||
|
"question_id": question.id,
|
||||||
|
"question_number": question_number,
|
||||||
|
"total_questions": total_questions,
|
||||||
|
"category": category_name,
|
||||||
|
"text": question.text,
|
||||||
|
"question_type": question.question_type,
|
||||||
|
"options": [{"value": o.value, "label": o.label} for o in (question.options or [])],
|
||||||
|
"help_text": question.help_text,
|
||||||
|
"default": question.default,
|
||||||
|
"dynamic_content": dynamic_content,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
def parse_response(self, response: str, question: Question) -> Any:
|
||||||
|
"""Parse JSON response from dashboard."""
|
||||||
|
import json
|
||||||
|
try:
|
||||||
|
data = json.loads(response)
|
||||||
|
return data.get("value", response)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
# Fall back to Claude parser
|
||||||
|
claude = ClaudePresenter()
|
||||||
|
return claude.parse_response(response, question)
|
||||||
|
|
||||||
|
def show_summary(self, blueprint: "StudyBlueprint") -> str:
|
||||||
|
"""Emit summary event for dashboard."""
|
||||||
|
import json
|
||||||
|
return json.dumps({
|
||||||
|
"type": "summary",
|
||||||
|
"data": blueprint.to_dict() if hasattr(blueprint, 'to_dict') else str(blueprint)
|
||||||
|
})
|
||||||
|
|
||||||
|
def show_warning(self, warning: str, severity: str = "warning") -> str:
|
||||||
|
"""Emit warning event for dashboard."""
|
||||||
|
import json
|
||||||
|
return json.dumps({
|
||||||
|
"type": "warning",
|
||||||
|
"data": {"message": warning, "severity": severity}
|
||||||
|
})
|
||||||
|
|
||||||
|
def show_progress(self, current: int, total: int, phase: str) -> str:
|
||||||
|
"""Emit progress event for dashboard."""
|
||||||
|
import json
|
||||||
|
return json.dumps({
|
||||||
|
"type": "progress",
|
||||||
|
"data": {"current": current, "total": total, "phase": phase}
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
class CLIPresenter(InterviewPresenter):
|
||||||
|
"""
|
||||||
|
Presenter for CLI wizard mode (future).
|
||||||
|
|
||||||
|
Interactive terminal prompts using Rich/Questionary.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def present_question(
|
||||||
|
self,
|
||||||
|
question: Question,
|
||||||
|
question_number: int,
|
||||||
|
total_questions: int,
|
||||||
|
category_name: str,
|
||||||
|
dynamic_content: Optional[str] = None
|
||||||
|
) -> str:
|
||||||
|
"""Format for CLI display."""
|
||||||
|
# Simple text format for CLI
|
||||||
|
lines = []
|
||||||
|
lines.append(f"\n[{question_number}/{total_questions}] {category_name}")
|
||||||
|
lines.append("-" * 50)
|
||||||
|
lines.append(question.text)
|
||||||
|
|
||||||
|
if question.options:
|
||||||
|
for i, opt in enumerate(question.options, 1):
|
||||||
|
lines.append(f" {i}. {opt.label}")
|
||||||
|
|
||||||
|
if question.help_text:
|
||||||
|
lines.append(f"\nHint: {question.help_text}")
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def parse_response(self, response: str, question: Question) -> Any:
|
||||||
|
"""Parse CLI response (delegate to Claude parser)."""
|
||||||
|
claude = ClaudePresenter()
|
||||||
|
return claude.parse_response(response, question)
|
||||||
|
|
||||||
|
def show_summary(self, blueprint: "StudyBlueprint") -> str:
|
||||||
|
"""Format summary for CLI."""
|
||||||
|
claude = ClaudePresenter()
|
||||||
|
return claude.show_summary(blueprint)
|
||||||
|
|
||||||
|
def show_warning(self, warning: str, severity: str = "warning") -> str:
|
||||||
|
"""Format warning for CLI."""
|
||||||
|
icons = {"error": "[ERROR]", "warning": "[WARN]", "info": "[INFO]"}
|
||||||
|
return f"\n{icons.get(severity, '[WARN]')} {warning}\n"
|
||||||
|
|
||||||
|
def show_progress(self, current: int, total: int, phase: str) -> str:
|
||||||
|
"""Format progress for CLI."""
|
||||||
|
return f"Progress: {current}/{total} ({phase})"
|
||||||
|
|
||||||
|
|
||||||
|
# Import for type hints
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from .study_blueprint import StudyBlueprint
|
||||||
556
optimization_engine/interview/interview_state.py
Normal file
556
optimization_engine/interview/interview_state.py
Normal file
@@ -0,0 +1,556 @@
|
|||||||
|
"""
|
||||||
|
Interview State Management
|
||||||
|
|
||||||
|
This module handles the persistence and management of interview state across sessions.
|
||||||
|
It provides:
|
||||||
|
- InterviewState: Complete state dataclass
|
||||||
|
- InterviewPhase: Enum for interview phases
|
||||||
|
- InterviewStateManager: Save/load/history functionality
|
||||||
|
- LogEntry: Audit log entries
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field, asdict
|
||||||
|
from datetime import datetime
|
||||||
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Any, Optional, Literal
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
import shutil
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
class InterviewPhase(Enum):
|
||||||
|
"""Interview phases in order of progression."""
|
||||||
|
INTROSPECTION = "introspection"
|
||||||
|
PROBLEM_DEFINITION = "problem_definition"
|
||||||
|
OBJECTIVES = "objectives"
|
||||||
|
CONSTRAINTS = "constraints"
|
||||||
|
DESIGN_VARIABLES = "design_variables"
|
||||||
|
VALIDATION = "validation"
|
||||||
|
REVIEW = "review"
|
||||||
|
COMPLETE = "complete"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_string(cls, s: str) -> "InterviewPhase":
|
||||||
|
"""Convert string to enum."""
|
||||||
|
for phase in cls:
|
||||||
|
if phase.value == s:
|
||||||
|
return phase
|
||||||
|
raise ValueError(f"Unknown phase: {s}")
|
||||||
|
|
||||||
|
def next_phase(self) -> Optional["InterviewPhase"]:
|
||||||
|
"""Get the next phase in sequence."""
|
||||||
|
phases = list(InterviewPhase)
|
||||||
|
idx = phases.index(self)
|
||||||
|
if idx < len(phases) - 1:
|
||||||
|
return phases[idx + 1]
|
||||||
|
return None
|
||||||
|
|
||||||
|
def previous_phase(self) -> Optional["InterviewPhase"]:
|
||||||
|
"""Get the previous phase in sequence."""
|
||||||
|
phases = list(InterviewPhase)
|
||||||
|
idx = phases.index(self)
|
||||||
|
if idx > 0:
|
||||||
|
return phases[idx - 1]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AnsweredQuestion:
|
||||||
|
"""Record of an answered question."""
|
||||||
|
question_id: str
|
||||||
|
answered_at: str # ISO datetime
|
||||||
|
raw_response: str
|
||||||
|
parsed_value: Any
|
||||||
|
inferred: Optional[Dict[str, Any]] = None # What was inferred from answer
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to dictionary."""
|
||||||
|
return {
|
||||||
|
"question_id": self.question_id,
|
||||||
|
"answered_at": self.answered_at,
|
||||||
|
"raw_response": self.raw_response,
|
||||||
|
"parsed_value": self.parsed_value,
|
||||||
|
"inferred": self.inferred,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> "AnsweredQuestion":
|
||||||
|
"""Create from dictionary."""
|
||||||
|
return cls(
|
||||||
|
question_id=data["question_id"],
|
||||||
|
answered_at=data["answered_at"],
|
||||||
|
raw_response=data["raw_response"],
|
||||||
|
parsed_value=data["parsed_value"],
|
||||||
|
inferred=data.get("inferred"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LogEntry:
|
||||||
|
"""Entry for the human-readable audit log."""
|
||||||
|
timestamp: datetime
|
||||||
|
question_id: str
|
||||||
|
question_text: str
|
||||||
|
answer_raw: str
|
||||||
|
answer_parsed: Any
|
||||||
|
inferred: Optional[Dict[str, Any]] = None
|
||||||
|
warnings: Optional[List[str]] = None
|
||||||
|
|
||||||
|
def to_markdown(self) -> str:
|
||||||
|
"""Format as markdown for audit log."""
|
||||||
|
lines = [
|
||||||
|
f"## [{self.timestamp.strftime('%Y-%m-%d %H:%M:%S')}] Question: {self.question_id}",
|
||||||
|
"",
|
||||||
|
f"**Question**: {self.question_text}",
|
||||||
|
"",
|
||||||
|
f"**Answer**: {self.answer_raw}",
|
||||||
|
"",
|
||||||
|
]
|
||||||
|
|
||||||
|
if self.answer_parsed != self.answer_raw:
|
||||||
|
lines.extend([
|
||||||
|
f"**Parsed Value**: `{self.answer_parsed}`",
|
||||||
|
"",
|
||||||
|
])
|
||||||
|
|
||||||
|
if self.inferred:
|
||||||
|
lines.append("**Inferred**:")
|
||||||
|
for key, value in self.inferred.items():
|
||||||
|
lines.append(f"- {key}: {value}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
if self.warnings:
|
||||||
|
lines.append("**Warnings**:")
|
||||||
|
for warning in self.warnings:
|
||||||
|
lines.append(f"- {warning}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
lines.append("---")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class InterviewState:
|
||||||
|
"""
|
||||||
|
Complete interview state (JSON-serializable).
|
||||||
|
|
||||||
|
This dataclass holds all state needed to resume an interview,
|
||||||
|
including introspection results, answers, and derived configuration.
|
||||||
|
"""
|
||||||
|
version: str = "1.0"
|
||||||
|
session_id: str = field(default_factory=lambda: str(uuid.uuid4()))
|
||||||
|
study_name: str = ""
|
||||||
|
study_path: str = ""
|
||||||
|
parent_study: Optional[str] = None
|
||||||
|
|
||||||
|
# Progress tracking
|
||||||
|
started_at: str = field(default_factory=lambda: datetime.now().isoformat())
|
||||||
|
last_updated: str = field(default_factory=lambda: datetime.now().isoformat())
|
||||||
|
current_phase: str = InterviewPhase.INTROSPECTION.value
|
||||||
|
complexity: Literal["simple", "moderate", "complex"] = "simple"
|
||||||
|
|
||||||
|
# Question tracking
|
||||||
|
questions_answered: List[Dict[str, Any]] = field(default_factory=list)
|
||||||
|
questions_remaining: List[str] = field(default_factory=list)
|
||||||
|
current_question_id: Optional[str] = None
|
||||||
|
|
||||||
|
# Introspection cache
|
||||||
|
introspection: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
# Collected answers (organized by category)
|
||||||
|
answers: Dict[str, Any] = field(default_factory=lambda: {
|
||||||
|
"problem_description": None,
|
||||||
|
"physical_context": None,
|
||||||
|
"analysis_types": [],
|
||||||
|
"objectives": [],
|
||||||
|
"constraints": [],
|
||||||
|
"design_variables": [],
|
||||||
|
"protocol": None,
|
||||||
|
"n_trials": 100,
|
||||||
|
"use_neural_acceleration": False,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Derived/inferred configuration
|
||||||
|
inferred_config: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
# Validation results
|
||||||
|
warnings: List[str] = field(default_factory=list)
|
||||||
|
warnings_acknowledged: List[str] = field(default_factory=list)
|
||||||
|
errors: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
# Blueprint (when complete)
|
||||||
|
blueprint: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
|
def get_phase(self) -> InterviewPhase:
|
||||||
|
"""Get current phase as enum."""
|
||||||
|
return InterviewPhase.from_string(self.current_phase)
|
||||||
|
|
||||||
|
def set_phase(self, phase: InterviewPhase) -> None:
|
||||||
|
"""Set current phase."""
|
||||||
|
self.current_phase = phase.value
|
||||||
|
self.touch()
|
||||||
|
|
||||||
|
def touch(self) -> None:
|
||||||
|
"""Update last_updated timestamp."""
|
||||||
|
self.last_updated = datetime.now().isoformat()
|
||||||
|
|
||||||
|
def is_complete(self) -> bool:
|
||||||
|
"""Check if interview is complete."""
|
||||||
|
return self.current_phase == InterviewPhase.COMPLETE.value
|
||||||
|
|
||||||
|
def current_question_count(self) -> int:
|
||||||
|
"""Get number of questions answered."""
|
||||||
|
return len(self.questions_answered)
|
||||||
|
|
||||||
|
def progress_percentage(self) -> float:
|
||||||
|
"""
|
||||||
|
Estimate progress through interview.
|
||||||
|
|
||||||
|
Based on phase, not questions, since questions are adaptive.
|
||||||
|
"""
|
||||||
|
phases = list(InterviewPhase)
|
||||||
|
current_idx = phases.index(self.get_phase())
|
||||||
|
return (current_idx / (len(phases) - 1)) * 100
|
||||||
|
|
||||||
|
def add_answered_question(self, question: AnsweredQuestion) -> None:
|
||||||
|
"""Record a question as answered."""
|
||||||
|
self.questions_answered.append(question.to_dict())
|
||||||
|
if question.question_id in self.questions_remaining:
|
||||||
|
self.questions_remaining.remove(question.question_id)
|
||||||
|
self.touch()
|
||||||
|
|
||||||
|
def get_answer(self, key: str, default: Any = None) -> Any:
|
||||||
|
"""Get an answer by key."""
|
||||||
|
return self.answers.get(key, default)
|
||||||
|
|
||||||
|
def set_answer(self, key: str, value: Any) -> None:
|
||||||
|
"""Set an answer."""
|
||||||
|
self.answers[key] = value
|
||||||
|
self.touch()
|
||||||
|
|
||||||
|
def add_warning(self, warning: str) -> None:
|
||||||
|
"""Add a warning message."""
|
||||||
|
if warning not in self.warnings:
|
||||||
|
self.warnings.append(warning)
|
||||||
|
self.touch()
|
||||||
|
|
||||||
|
def acknowledge_warning(self, warning: str) -> None:
|
||||||
|
"""Mark a warning as acknowledged."""
|
||||||
|
if warning in self.warnings and warning not in self.warnings_acknowledged:
|
||||||
|
self.warnings_acknowledged.append(warning)
|
||||||
|
self.touch()
|
||||||
|
|
||||||
|
def has_unacknowledged_errors(self) -> bool:
|
||||||
|
"""Check if there are blocking errors."""
|
||||||
|
return len(self.errors) > 0
|
||||||
|
|
||||||
|
def has_unacknowledged_warnings(self) -> bool:
|
||||||
|
"""Check if there are unacknowledged warnings."""
|
||||||
|
return any(w not in self.warnings_acknowledged for w in self.warnings)
|
||||||
|
|
||||||
|
def to_json(self) -> str:
|
||||||
|
"""Serialize to JSON string."""
|
||||||
|
return json.dumps(asdict(self), indent=2, default=str)
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to dictionary."""
|
||||||
|
return asdict(self)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, json_str: str) -> "InterviewState":
|
||||||
|
"""Deserialize from JSON string."""
|
||||||
|
data = json.loads(json_str)
|
||||||
|
return cls.from_dict(data)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> "InterviewState":
|
||||||
|
"""Create from dictionary."""
|
||||||
|
# Handle nested types
|
||||||
|
return cls(
|
||||||
|
version=data.get("version", "1.0"),
|
||||||
|
session_id=data.get("session_id", str(uuid.uuid4())),
|
||||||
|
study_name=data.get("study_name", ""),
|
||||||
|
study_path=data.get("study_path", ""),
|
||||||
|
parent_study=data.get("parent_study"),
|
||||||
|
started_at=data.get("started_at", datetime.now().isoformat()),
|
||||||
|
last_updated=data.get("last_updated", datetime.now().isoformat()),
|
||||||
|
current_phase=data.get("current_phase", InterviewPhase.INTROSPECTION.value),
|
||||||
|
complexity=data.get("complexity", "simple"),
|
||||||
|
questions_answered=data.get("questions_answered", []),
|
||||||
|
questions_remaining=data.get("questions_remaining", []),
|
||||||
|
current_question_id=data.get("current_question_id"),
|
||||||
|
introspection=data.get("introspection", {}),
|
||||||
|
answers=data.get("answers", {}),
|
||||||
|
inferred_config=data.get("inferred_config", {}),
|
||||||
|
warnings=data.get("warnings", []),
|
||||||
|
warnings_acknowledged=data.get("warnings_acknowledged", []),
|
||||||
|
errors=data.get("errors", []),
|
||||||
|
blueprint=data.get("blueprint"),
|
||||||
|
)
|
||||||
|
|
||||||
|
def validate(self) -> List[str]:
|
||||||
|
"""Validate state, return list of errors."""
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
if not self.session_id:
|
||||||
|
errors.append("Missing session_id")
|
||||||
|
if not self.study_name:
|
||||||
|
errors.append("Missing study_name")
|
||||||
|
|
||||||
|
try:
|
||||||
|
InterviewPhase.from_string(self.current_phase)
|
||||||
|
except ValueError:
|
||||||
|
errors.append(f"Invalid current_phase: {self.current_phase}")
|
||||||
|
|
||||||
|
if self.complexity not in ["simple", "moderate", "complex"]:
|
||||||
|
errors.append(f"Invalid complexity: {self.complexity}")
|
||||||
|
|
||||||
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class StateSnapshot:
|
||||||
|
"""Snapshot of state for history/undo."""
|
||||||
|
timestamp: str
|
||||||
|
phase: str
|
||||||
|
questions_count: int
|
||||||
|
state_hash: str
|
||||||
|
file_path: str
|
||||||
|
|
||||||
|
|
||||||
|
class InterviewStateManager:
|
||||||
|
"""
|
||||||
|
Manages interview state persistence.
|
||||||
|
|
||||||
|
Handles:
|
||||||
|
- Save/load state to JSON
|
||||||
|
- Human-readable audit log (MD)
|
||||||
|
- State backup rotation
|
||||||
|
- History for undo/branch
|
||||||
|
"""
|
||||||
|
|
||||||
|
MAX_BACKUPS = 5
|
||||||
|
|
||||||
|
def __init__(self, study_path: Path):
|
||||||
|
"""
|
||||||
|
Initialize state manager.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
study_path: Path to the study directory
|
||||||
|
"""
|
||||||
|
self.study_path = Path(study_path)
|
||||||
|
self.interview_dir = self.study_path / ".interview"
|
||||||
|
self.state_file = self.interview_dir / "interview_state.json"
|
||||||
|
self.log_file = self.interview_dir / "INTERVIEW_LOG.md"
|
||||||
|
self.backup_dir = self.interview_dir / "backups"
|
||||||
|
self.lock_file = self.interview_dir / ".lock"
|
||||||
|
|
||||||
|
# Ensure directories exist
|
||||||
|
self._ensure_directories()
|
||||||
|
|
||||||
|
def _ensure_directories(self) -> None:
|
||||||
|
"""Create necessary directories if they don't exist."""
|
||||||
|
self.interview_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
self.backup_dir.mkdir(exist_ok=True)
|
||||||
|
|
||||||
|
def _acquire_lock(self) -> bool:
|
||||||
|
"""Acquire lock file for concurrent access prevention."""
|
||||||
|
try:
|
||||||
|
if self.lock_file.exists():
|
||||||
|
# Check if lock is stale (older than 5 minutes)
|
||||||
|
mtime = self.lock_file.stat().st_mtime
|
||||||
|
age = datetime.now().timestamp() - mtime
|
||||||
|
if age > 300: # 5 minutes
|
||||||
|
self.lock_file.unlink()
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.lock_file.write_text(str(os.getpid()))
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _release_lock(self) -> None:
|
||||||
|
"""Release lock file."""
|
||||||
|
try:
|
||||||
|
if self.lock_file.exists():
|
||||||
|
self.lock_file.unlink()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def exists(self) -> bool:
|
||||||
|
"""Check if a saved state exists."""
|
||||||
|
return self.state_file.exists()
|
||||||
|
|
||||||
|
def save_state(self, state: InterviewState) -> None:
|
||||||
|
"""
|
||||||
|
Persist current state to JSON.
|
||||||
|
|
||||||
|
Performs atomic write with backup rotation.
|
||||||
|
"""
|
||||||
|
if not self._acquire_lock():
|
||||||
|
raise RuntimeError("Could not acquire lock for state file")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Update timestamp
|
||||||
|
state.touch()
|
||||||
|
|
||||||
|
# Create backup if state file exists
|
||||||
|
if self.state_file.exists():
|
||||||
|
self._rotate_backups()
|
||||||
|
backup_name = f"state_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
||||||
|
shutil.copy(self.state_file, self.backup_dir / backup_name)
|
||||||
|
|
||||||
|
# Atomic write: write to temp file then rename
|
||||||
|
temp_file = self.state_file.with_suffix(".tmp")
|
||||||
|
temp_file.write_text(state.to_json(), encoding="utf-8")
|
||||||
|
temp_file.replace(self.state_file)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
self._release_lock()
|
||||||
|
|
||||||
|
def _rotate_backups(self) -> None:
|
||||||
|
"""Keep only the most recent backups."""
|
||||||
|
backups = sorted(
|
||||||
|
self.backup_dir.glob("state_*.json"),
|
||||||
|
key=lambda p: p.stat().st_mtime,
|
||||||
|
reverse=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove old backups
|
||||||
|
for backup in backups[self.MAX_BACKUPS:]:
|
||||||
|
backup.unlink()
|
||||||
|
|
||||||
|
def load_state(self) -> Optional[InterviewState]:
|
||||||
|
"""
|
||||||
|
Load existing state if available.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
InterviewState if exists and valid, None otherwise
|
||||||
|
"""
|
||||||
|
if not self.state_file.exists():
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
json_str = self.state_file.read_text(encoding="utf-8")
|
||||||
|
state = InterviewState.from_json(json_str)
|
||||||
|
|
||||||
|
# Validate state
|
||||||
|
errors = state.validate()
|
||||||
|
if errors:
|
||||||
|
raise ValueError(f"Invalid state: {errors}")
|
||||||
|
|
||||||
|
return state
|
||||||
|
|
||||||
|
except (json.JSONDecodeError, ValueError) as e:
|
||||||
|
# Log error but don't crash
|
||||||
|
print(f"Warning: Could not load interview state: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def append_log(self, entry: LogEntry) -> None:
|
||||||
|
"""
|
||||||
|
Add entry to human-readable audit log.
|
||||||
|
|
||||||
|
Creates log file with header if it doesn't exist.
|
||||||
|
"""
|
||||||
|
# Initialize log file if needed
|
||||||
|
if not self.log_file.exists():
|
||||||
|
header = self._create_log_header()
|
||||||
|
self.log_file.write_text(header, encoding="utf-8")
|
||||||
|
|
||||||
|
# Append entry
|
||||||
|
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||||
|
f.write(entry.to_markdown())
|
||||||
|
|
||||||
|
def _create_log_header(self) -> str:
|
||||||
|
"""Create header for new log file."""
|
||||||
|
return f"""# Interview Log
|
||||||
|
|
||||||
|
**Study**: {self.study_path.name}
|
||||||
|
**Started**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
||||||
|
|
||||||
|
This log records all questions and answers from the study interview process.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def finalize_log(self, state: InterviewState) -> None:
|
||||||
|
"""Add final summary to log when interview completes."""
|
||||||
|
summary = f"""
|
||||||
|
## Interview Complete
|
||||||
|
|
||||||
|
**Completed**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
||||||
|
**Questions Answered**: {len(state.questions_answered)}
|
||||||
|
**Complexity**: {state.complexity}
|
||||||
|
|
||||||
|
### Summary
|
||||||
|
|
||||||
|
- **Problem**: {state.answers.get('problem_description', 'N/A')}
|
||||||
|
- **Objectives**: {len(state.answers.get('objectives', []))}
|
||||||
|
- **Constraints**: {len(state.answers.get('constraints', []))}
|
||||||
|
- **Design Variables**: {len(state.answers.get('design_variables', []))}
|
||||||
|
|
||||||
|
### Warnings Acknowledged
|
||||||
|
|
||||||
|
"""
|
||||||
|
for warning in state.warnings_acknowledged:
|
||||||
|
summary += f"- {warning}\n"
|
||||||
|
|
||||||
|
if not state.warnings_acknowledged:
|
||||||
|
summary += "- None\n"
|
||||||
|
|
||||||
|
summary += "\n---\n"
|
||||||
|
|
||||||
|
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||||
|
f.write(summary)
|
||||||
|
|
||||||
|
def get_history(self) -> List[StateSnapshot]:
|
||||||
|
"""
|
||||||
|
Get modification history for undo/branch.
|
||||||
|
|
||||||
|
Returns list of state snapshots from backups.
|
||||||
|
"""
|
||||||
|
snapshots = []
|
||||||
|
|
||||||
|
for backup in sorted(self.backup_dir.glob("state_*.json")):
|
||||||
|
try:
|
||||||
|
data = json.loads(backup.read_text(encoding="utf-8"))
|
||||||
|
snapshot = StateSnapshot(
|
||||||
|
timestamp=data.get("last_updated", "unknown"),
|
||||||
|
phase=data.get("current_phase", "unknown"),
|
||||||
|
questions_count=len(data.get("questions_answered", [])),
|
||||||
|
state_hash=str(hash(backup.read_text())),
|
||||||
|
file_path=str(backup),
|
||||||
|
)
|
||||||
|
snapshots.append(snapshot)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return snapshots
|
||||||
|
|
||||||
|
def restore_from_backup(self, backup_path: str) -> Optional[InterviewState]:
|
||||||
|
"""Restore state from a backup file."""
|
||||||
|
backup = Path(backup_path)
|
||||||
|
if not backup.exists():
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
json_str = backup.read_text(encoding="utf-8")
|
||||||
|
return InterviewState.from_json(json_str)
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def delete_state(self) -> None:
|
||||||
|
"""Delete all interview state (for restart)."""
|
||||||
|
if self.state_file.exists():
|
||||||
|
self.state_file.unlink()
|
||||||
|
|
||||||
|
# Keep log file but add note
|
||||||
|
if self.log_file.exists():
|
||||||
|
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||||
|
f.write(f"\n## State Reset\n\n**Reset at**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n---\n\n")
|
||||||
747
optimization_engine/interview/question_engine.py
Normal file
747
optimization_engine/interview/question_engine.py
Normal file
@@ -0,0 +1,747 @@
|
|||||||
|
"""
|
||||||
|
Question Engine
|
||||||
|
|
||||||
|
This module manages question definitions, conditions, and dynamic options.
|
||||||
|
It handles:
|
||||||
|
- Loading question schemas from JSON
|
||||||
|
- Evaluating conditional logic
|
||||||
|
- Populating dynamic options from introspection
|
||||||
|
- Question ordering and flow control
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Any, Optional, Literal, Union
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ValidationRule:
|
||||||
|
"""Validation rule for a question answer."""
|
||||||
|
required: bool = False
|
||||||
|
min_length: Optional[int] = None
|
||||||
|
max_length: Optional[int] = None
|
||||||
|
min: Optional[float] = None
|
||||||
|
max: Optional[float] = None
|
||||||
|
min_selections: Optional[int] = None
|
||||||
|
max_selections: Optional[int] = None
|
||||||
|
pattern: Optional[str] = None
|
||||||
|
units: Optional[str] = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Optional[Dict[str, Any]]) -> Optional["ValidationRule"]:
|
||||||
|
"""Create from dictionary."""
|
||||||
|
if data is None:
|
||||||
|
return None
|
||||||
|
return cls(
|
||||||
|
required=data.get("required", False),
|
||||||
|
min_length=data.get("min_length"),
|
||||||
|
max_length=data.get("max_length"),
|
||||||
|
min=data.get("min"),
|
||||||
|
max=data.get("max"),
|
||||||
|
min_selections=data.get("min_selections"),
|
||||||
|
max_selections=data.get("max_selections"),
|
||||||
|
pattern=data.get("pattern"),
|
||||||
|
units=data.get("units"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class QuestionOption:
|
||||||
|
"""Option for choice/multi_choice questions."""
|
||||||
|
value: Any
|
||||||
|
label: str
|
||||||
|
description: Optional[str] = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> "QuestionOption":
|
||||||
|
"""Create from dictionary."""
|
||||||
|
return cls(
|
||||||
|
value=data["value"],
|
||||||
|
label=data["label"],
|
||||||
|
description=data.get("description"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class QuestionCondition:
|
||||||
|
"""
|
||||||
|
Conditional logic for when to ask a question.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- answered: field has been answered
|
||||||
|
- equals: field equals value
|
||||||
|
- contains: array field contains value
|
||||||
|
- greater_than: numeric comparison
|
||||||
|
- less_than: numeric comparison
|
||||||
|
- exists: field exists and is not None
|
||||||
|
- introspection_has: introspection data has field
|
||||||
|
- complexity_is: complexity level matches
|
||||||
|
- and/or/not: logical operators
|
||||||
|
"""
|
||||||
|
type: str
|
||||||
|
field: Optional[str] = None
|
||||||
|
value: Optional[Any] = None
|
||||||
|
condition: Optional["QuestionCondition"] = None # For 'not'
|
||||||
|
conditions: Optional[List["QuestionCondition"]] = None # For 'and'/'or'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Optional[Dict[str, Any]]) -> Optional["QuestionCondition"]:
|
||||||
|
"""Create from dictionary."""
|
||||||
|
if data is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
condition = cls(
|
||||||
|
type=data["type"],
|
||||||
|
field=data.get("field"),
|
||||||
|
value=data.get("value"),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle nested 'not' condition
|
||||||
|
if "condition" in data:
|
||||||
|
condition.condition = cls.from_dict(data["condition"])
|
||||||
|
|
||||||
|
# Handle nested 'and'/'or' conditions
|
||||||
|
if "conditions" in data:
|
||||||
|
condition.conditions = [
|
||||||
|
cls.from_dict(c) for c in data["conditions"]
|
||||||
|
]
|
||||||
|
|
||||||
|
return condition
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DynamicOptions:
|
||||||
|
"""Configuration for dynamic option population."""
|
||||||
|
type: str
|
||||||
|
source: str
|
||||||
|
filter: Optional[str] = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Optional[Dict[str, Any]]) -> Optional["DynamicOptions"]:
|
||||||
|
"""Create from dictionary."""
|
||||||
|
if data is None:
|
||||||
|
return None
|
||||||
|
return cls(
|
||||||
|
type=data["type"],
|
||||||
|
source=data["source"],
|
||||||
|
filter=data.get("filter"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DynamicContent:
|
||||||
|
"""Configuration for dynamic content in question text."""
|
||||||
|
type: str
|
||||||
|
source: str
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Optional[Dict[str, Any]]) -> Optional["DynamicContent"]:
|
||||||
|
"""Create from dictionary."""
|
||||||
|
if data is None:
|
||||||
|
return None
|
||||||
|
return cls(
|
||||||
|
type=data["type"],
|
||||||
|
source=data["source"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Question:
|
||||||
|
"""Represents a single interview question."""
|
||||||
|
id: str
|
||||||
|
category: str
|
||||||
|
text: str
|
||||||
|
question_type: Literal["text", "choice", "multi_choice", "numeric", "confirm", "parameter_select", "bounds"]
|
||||||
|
maps_to: str
|
||||||
|
help_text: Optional[str] = None
|
||||||
|
options: Optional[List[QuestionOption]] = None
|
||||||
|
default: Optional[Any] = None
|
||||||
|
validation: Optional[ValidationRule] = None
|
||||||
|
condition: Optional[QuestionCondition] = None
|
||||||
|
engineering_guidance: Optional[str] = None
|
||||||
|
dynamic_options: Optional[DynamicOptions] = None
|
||||||
|
dynamic_content: Optional[DynamicContent] = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> "Question":
|
||||||
|
"""Create from dictionary."""
|
||||||
|
options = None
|
||||||
|
if data.get("options"):
|
||||||
|
options = [QuestionOption.from_dict(o) for o in data["options"]]
|
||||||
|
|
||||||
|
return cls(
|
||||||
|
id=data["id"],
|
||||||
|
category=data["category"],
|
||||||
|
text=data["text"],
|
||||||
|
question_type=data["question_type"],
|
||||||
|
maps_to=data["maps_to"],
|
||||||
|
help_text=data.get("help_text"),
|
||||||
|
options=options,
|
||||||
|
default=data.get("default"),
|
||||||
|
validation=ValidationRule.from_dict(data.get("validation")),
|
||||||
|
condition=QuestionCondition.from_dict(data.get("condition")),
|
||||||
|
engineering_guidance=data.get("engineering_guidance"),
|
||||||
|
dynamic_options=DynamicOptions.from_dict(data.get("dynamic_options")),
|
||||||
|
dynamic_content=DynamicContent.from_dict(data.get("dynamic_content")),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class QuestionCategory:
|
||||||
|
"""Category of related questions."""
|
||||||
|
id: str
|
||||||
|
name: str
|
||||||
|
phase: str
|
||||||
|
order: int
|
||||||
|
always_ask: bool = True
|
||||||
|
condition: Optional[QuestionCondition] = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> "QuestionCategory":
|
||||||
|
"""Create from dictionary."""
|
||||||
|
return cls(
|
||||||
|
id=data["id"],
|
||||||
|
name=data["name"],
|
||||||
|
phase=data["phase"],
|
||||||
|
order=data["order"],
|
||||||
|
always_ask=data.get("always_ask", True),
|
||||||
|
condition=QuestionCondition.from_dict(data.get("condition")),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class QuestionEngine:
|
||||||
|
"""
|
||||||
|
Manages question definitions and flow logic.
|
||||||
|
|
||||||
|
Handles:
|
||||||
|
- Loading questions from JSON schema
|
||||||
|
- Evaluating conditions to determine next question
|
||||||
|
- Populating dynamic options from introspection
|
||||||
|
- Answer parsing and validation
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, schema_path: Optional[Path] = None):
|
||||||
|
"""
|
||||||
|
Initialize question engine.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
schema_path: Path to question schema JSON. If None, uses default.
|
||||||
|
"""
|
||||||
|
if schema_path is None:
|
||||||
|
schema_path = Path(__file__).parent / "schemas" / "interview_questions.json"
|
||||||
|
|
||||||
|
self.schema_path = schema_path
|
||||||
|
self.schema: Dict[str, Any] = {}
|
||||||
|
self.categories: List[QuestionCategory] = []
|
||||||
|
self.questions: Dict[str, Question] = {}
|
||||||
|
self.questions_by_category: Dict[str, List[Question]] = {}
|
||||||
|
|
||||||
|
self._load_schema()
|
||||||
|
|
||||||
|
def _load_schema(self) -> None:
|
||||||
|
"""Load question schema from JSON file."""
|
||||||
|
if not self.schema_path.exists():
|
||||||
|
raise FileNotFoundError(f"Question schema not found: {self.schema_path}")
|
||||||
|
|
||||||
|
with open(self.schema_path, "r", encoding="utf-8") as f:
|
||||||
|
self.schema = json.load(f)
|
||||||
|
|
||||||
|
# Parse categories
|
||||||
|
self.categories = [
|
||||||
|
QuestionCategory.from_dict(c) for c in self.schema.get("categories", [])
|
||||||
|
]
|
||||||
|
self.categories.sort(key=lambda c: c.order)
|
||||||
|
|
||||||
|
# Parse questions
|
||||||
|
for q_data in self.schema.get("questions", []):
|
||||||
|
question = Question.from_dict(q_data)
|
||||||
|
self.questions[question.id] = question
|
||||||
|
|
||||||
|
# Organize by category
|
||||||
|
if question.category not in self.questions_by_category:
|
||||||
|
self.questions_by_category[question.category] = []
|
||||||
|
self.questions_by_category[question.category].append(question)
|
||||||
|
|
||||||
|
def get_all_questions(self) -> List[Question]:
|
||||||
|
"""Get all questions in order."""
|
||||||
|
result = []
|
||||||
|
for category in self.categories:
|
||||||
|
if category.id in self.questions_by_category:
|
||||||
|
result.extend(self.questions_by_category[category.id])
|
||||||
|
return result
|
||||||
|
|
||||||
|
def get_question(self, question_id: str) -> Optional[Question]:
|
||||||
|
"""Get a specific question by ID."""
|
||||||
|
return self.questions.get(question_id)
|
||||||
|
|
||||||
|
def get_next_question(
|
||||||
|
self,
|
||||||
|
state: "InterviewState",
|
||||||
|
introspection: Dict[str, Any]
|
||||||
|
) -> Optional[Question]:
|
||||||
|
"""
|
||||||
|
Determine the next question based on state and conditions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: Current interview state
|
||||||
|
introspection: Introspection results from model
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Next question to ask, or None if interview is complete
|
||||||
|
"""
|
||||||
|
answered_ids = {q["question_id"] for q in state.questions_answered}
|
||||||
|
|
||||||
|
# Go through categories in order
|
||||||
|
for category in self.categories:
|
||||||
|
# Check if category should be asked
|
||||||
|
if not self._should_ask_category(category, state, introspection):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get questions in this category
|
||||||
|
category_questions = self.questions_by_category.get(category.id, [])
|
||||||
|
|
||||||
|
for question in category_questions:
|
||||||
|
# Skip if already answered
|
||||||
|
if question.id in answered_ids:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if question condition is met
|
||||||
|
if self._should_ask_question(question, state, introspection):
|
||||||
|
# Populate dynamic options if needed
|
||||||
|
return self._prepare_question(question, state, introspection)
|
||||||
|
|
||||||
|
# No more questions
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _should_ask_category(
|
||||||
|
self,
|
||||||
|
category: QuestionCategory,
|
||||||
|
state: "InterviewState",
|
||||||
|
introspection: Dict[str, Any]
|
||||||
|
) -> bool:
|
||||||
|
"""Check if a category should be asked."""
|
||||||
|
if category.always_ask:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if category.condition:
|
||||||
|
return self.evaluate_condition(category.condition, state, introspection)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _should_ask_question(
|
||||||
|
self,
|
||||||
|
question: Question,
|
||||||
|
state: "InterviewState",
|
||||||
|
introspection: Dict[str, Any]
|
||||||
|
) -> bool:
|
||||||
|
"""Check if a question should be asked."""
|
||||||
|
if question.condition is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return self.evaluate_condition(question.condition, state, introspection)
|
||||||
|
|
||||||
|
def evaluate_condition(
|
||||||
|
self,
|
||||||
|
condition: QuestionCondition,
|
||||||
|
state: "InterviewState",
|
||||||
|
introspection: Dict[str, Any]
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Evaluate if a condition is met.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
condition: Condition to evaluate
|
||||||
|
state: Current interview state
|
||||||
|
introspection: Introspection results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if condition is met
|
||||||
|
"""
|
||||||
|
cond_type = condition.type
|
||||||
|
|
||||||
|
if cond_type == "answered":
|
||||||
|
return self._get_nested_value(state.answers, condition.field) is not None
|
||||||
|
|
||||||
|
elif cond_type == "equals":
|
||||||
|
actual = self._get_nested_value(state.answers, condition.field)
|
||||||
|
return actual == condition.value
|
||||||
|
|
||||||
|
elif cond_type == "contains":
|
||||||
|
actual = self._get_nested_value(state.answers, condition.field)
|
||||||
|
if isinstance(actual, list):
|
||||||
|
return condition.value in actual
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cond_type == "greater_than":
|
||||||
|
actual = self._get_nested_value(state.answers, condition.field)
|
||||||
|
if actual is not None and isinstance(actual, (int, float)):
|
||||||
|
return actual > condition.value
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cond_type == "less_than":
|
||||||
|
actual = self._get_nested_value(state.answers, condition.field)
|
||||||
|
if actual is not None and isinstance(actual, (int, float)):
|
||||||
|
return actual < condition.value
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cond_type == "exists":
|
||||||
|
actual = self._get_nested_value(state.answers, condition.field)
|
||||||
|
return actual is not None
|
||||||
|
|
||||||
|
elif cond_type == "introspection_has":
|
||||||
|
return condition.field in introspection
|
||||||
|
|
||||||
|
elif cond_type == "complexity_is":
|
||||||
|
expected = condition.value
|
||||||
|
if isinstance(expected, list):
|
||||||
|
return state.complexity in expected
|
||||||
|
return state.complexity == expected
|
||||||
|
|
||||||
|
elif cond_type == "and":
|
||||||
|
if condition.conditions:
|
||||||
|
return all(
|
||||||
|
self.evaluate_condition(c, state, introspection)
|
||||||
|
for c in condition.conditions
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
elif cond_type == "or":
|
||||||
|
if condition.conditions:
|
||||||
|
return any(
|
||||||
|
self.evaluate_condition(c, state, introspection)
|
||||||
|
for c in condition.conditions
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cond_type == "not":
|
||||||
|
if condition.condition:
|
||||||
|
return not self.evaluate_condition(condition.condition, state, introspection)
|
||||||
|
return True
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Unknown condition type
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _get_nested_value(self, data: Dict[str, Any], path: str) -> Any:
|
||||||
|
"""
|
||||||
|
Get a value from nested dict using dot notation.
|
||||||
|
|
||||||
|
Supports array indexing: "objectives[0].goal"
|
||||||
|
"""
|
||||||
|
if not path:
|
||||||
|
return None
|
||||||
|
|
||||||
|
parts = re.split(r'\.|\[|\]', path)
|
||||||
|
parts = [p for p in parts if p] # Remove empty strings
|
||||||
|
|
||||||
|
current = data
|
||||||
|
for part in parts:
|
||||||
|
if current is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if isinstance(current, dict):
|
||||||
|
current = current.get(part)
|
||||||
|
elif isinstance(current, list):
|
||||||
|
try:
|
||||||
|
idx = int(part)
|
||||||
|
if 0 <= idx < len(current):
|
||||||
|
current = current[idx]
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return current
|
||||||
|
|
||||||
|
def _prepare_question(
|
||||||
|
self,
|
||||||
|
question: Question,
|
||||||
|
state: "InterviewState",
|
||||||
|
introspection: Dict[str, Any]
|
||||||
|
) -> Question:
|
||||||
|
"""
|
||||||
|
Prepare a question for presentation.
|
||||||
|
|
||||||
|
Populates dynamic options and content.
|
||||||
|
"""
|
||||||
|
# Create a copy to avoid mutating the original
|
||||||
|
import copy
|
||||||
|
prepared = copy.deepcopy(question)
|
||||||
|
|
||||||
|
# Populate dynamic options
|
||||||
|
if prepared.dynamic_options:
|
||||||
|
prepared.options = self._populate_dynamic_options(
|
||||||
|
prepared.dynamic_options, state, introspection
|
||||||
|
)
|
||||||
|
|
||||||
|
return prepared
|
||||||
|
|
||||||
|
def _populate_dynamic_options(
|
||||||
|
self,
|
||||||
|
dynamic: DynamicOptions,
|
||||||
|
state: "InterviewState",
|
||||||
|
introspection: Dict[str, Any]
|
||||||
|
) -> List[QuestionOption]:
|
||||||
|
"""Populate dynamic options from introspection data."""
|
||||||
|
options = []
|
||||||
|
|
||||||
|
if dynamic.type == "expressions":
|
||||||
|
# Get expressions from introspection
|
||||||
|
expressions = introspection.get("expressions", [])
|
||||||
|
|
||||||
|
# Apply filter if specified
|
||||||
|
if dynamic.filter == "design_variable_heuristics":
|
||||||
|
expressions = self._filter_design_variables(expressions)
|
||||||
|
elif dynamic.filter == "exclude_selected_dvs":
|
||||||
|
selected = [dv.get("parameter") for dv in state.answers.get("design_variables", [])]
|
||||||
|
expressions = [e for e in expressions if e.get("name") not in selected]
|
||||||
|
|
||||||
|
# Convert to options
|
||||||
|
for expr in expressions:
|
||||||
|
name = expr.get("name", "")
|
||||||
|
value = expr.get("value", 0)
|
||||||
|
options.append(QuestionOption(
|
||||||
|
value=name,
|
||||||
|
label=f"{name} (current: {value})",
|
||||||
|
description=expr.get("formula") if expr.get("formula") != str(value) else None,
|
||||||
|
))
|
||||||
|
|
||||||
|
return options
|
||||||
|
|
||||||
|
def _filter_design_variables(self, expressions: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||||
|
"""Filter expressions to likely design variables using heuristics."""
|
||||||
|
# High confidence patterns
|
||||||
|
high_patterns = [
|
||||||
|
r"thickness", r"width", r"height", r"diameter", r"radius",
|
||||||
|
r"length", r"depth", r"angle", r"fillet", r"chamfer",
|
||||||
|
r"rib_\w+", r"wall_\w+", r"flange_\w+"
|
||||||
|
]
|
||||||
|
|
||||||
|
# Medium confidence patterns
|
||||||
|
medium_patterns = [
|
||||||
|
r"dim_\w+", r"size_\w+", r"param_\w+", r"p\d+", r"var_\w+"
|
||||||
|
]
|
||||||
|
|
||||||
|
# Exclusion patterns
|
||||||
|
exclude_patterns = [
|
||||||
|
r"mesh_\w+", r"count_\w+", r"num_\w+", r"material\w*",
|
||||||
|
r"derived_\w+", r"calc_\w+", r"_result$", r"_output$"
|
||||||
|
]
|
||||||
|
|
||||||
|
def matches_any(name: str, patterns: List[str]) -> bool:
|
||||||
|
return any(re.search(p, name.lower()) for p in patterns)
|
||||||
|
|
||||||
|
# Score and filter
|
||||||
|
scored = []
|
||||||
|
for expr in expressions:
|
||||||
|
name = expr.get("name", "")
|
||||||
|
|
||||||
|
# Skip exclusions
|
||||||
|
if matches_any(name, exclude_patterns):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Skip if not a simple numeric value
|
||||||
|
value = expr.get("value")
|
||||||
|
if not isinstance(value, (int, float)):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Skip if it's a formula (computed value)
|
||||||
|
formula = expr.get("formula", "")
|
||||||
|
if formula and formula != str(value):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Score
|
||||||
|
score = 0
|
||||||
|
if matches_any(name, high_patterns):
|
||||||
|
score = 2
|
||||||
|
elif matches_any(name, medium_patterns):
|
||||||
|
score = 1
|
||||||
|
|
||||||
|
if score > 0 or len(name) > 2: # Include if named or matches pattern
|
||||||
|
scored.append((score, expr))
|
||||||
|
|
||||||
|
# Sort by score descending
|
||||||
|
scored.sort(key=lambda x: -x[0])
|
||||||
|
|
||||||
|
return [expr for _, expr in scored]
|
||||||
|
|
||||||
|
def validate_answer(
|
||||||
|
self,
|
||||||
|
answer: Any,
|
||||||
|
question: Question
|
||||||
|
) -> tuple[bool, Optional[str]]:
|
||||||
|
"""
|
||||||
|
Validate an answer against question rules.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (is_valid, error_message)
|
||||||
|
"""
|
||||||
|
if question.validation is None:
|
||||||
|
return True, None
|
||||||
|
|
||||||
|
validation = question.validation
|
||||||
|
|
||||||
|
# Required check
|
||||||
|
if validation.required:
|
||||||
|
if answer is None or answer == "" or answer == []:
|
||||||
|
return False, "This field is required"
|
||||||
|
|
||||||
|
# Skip further validation if empty and not required
|
||||||
|
if answer is None or answer == "":
|
||||||
|
return True, None
|
||||||
|
|
||||||
|
# Text length validation
|
||||||
|
if question.question_type == "text":
|
||||||
|
if validation.min_length and len(str(answer)) < validation.min_length:
|
||||||
|
return False, f"Answer must be at least {validation.min_length} characters"
|
||||||
|
if validation.max_length and len(str(answer)) > validation.max_length:
|
||||||
|
return False, f"Answer must be at most {validation.max_length} characters"
|
||||||
|
|
||||||
|
# Numeric validation
|
||||||
|
if question.question_type == "numeric":
|
||||||
|
try:
|
||||||
|
num = float(answer)
|
||||||
|
if validation.min is not None and num < validation.min:
|
||||||
|
return False, f"Value must be at least {validation.min}"
|
||||||
|
if validation.max is not None and num > validation.max:
|
||||||
|
return False, f"Value must be at most {validation.max}"
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
return False, "Please enter a valid number"
|
||||||
|
|
||||||
|
# Multi-choice validation
|
||||||
|
if question.question_type in ["multi_choice", "parameter_select"]:
|
||||||
|
if isinstance(answer, list):
|
||||||
|
if validation.min_selections and len(answer) < validation.min_selections:
|
||||||
|
return False, f"Please select at least {validation.min_selections} option(s)"
|
||||||
|
if validation.max_selections and len(answer) > validation.max_selections:
|
||||||
|
return False, f"Please select at most {validation.max_selections} option(s)"
|
||||||
|
|
||||||
|
# Pattern validation
|
||||||
|
if validation.pattern:
|
||||||
|
if not re.match(validation.pattern, str(answer)):
|
||||||
|
return False, "Answer does not match required format"
|
||||||
|
|
||||||
|
return True, None
|
||||||
|
|
||||||
|
def parse_answer(
|
||||||
|
self,
|
||||||
|
raw_answer: str,
|
||||||
|
question: Question
|
||||||
|
) -> Any:
|
||||||
|
"""
|
||||||
|
Parse a raw answer string into the appropriate type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
raw_answer: Raw string answer from user
|
||||||
|
question: Question being answered
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Parsed answer value
|
||||||
|
"""
|
||||||
|
answer = raw_answer.strip()
|
||||||
|
|
||||||
|
if question.question_type == "text":
|
||||||
|
return answer
|
||||||
|
|
||||||
|
elif question.question_type == "numeric":
|
||||||
|
# Extract number, handling units
|
||||||
|
match = re.search(r"[-+]?\d*\.?\d+", answer)
|
||||||
|
if match:
|
||||||
|
return float(match.group())
|
||||||
|
return None
|
||||||
|
|
||||||
|
elif question.question_type == "confirm":
|
||||||
|
lower = answer.lower()
|
||||||
|
if lower in ["yes", "y", "true", "1", "ok", "sure", "confirm", "correct"]:
|
||||||
|
return True
|
||||||
|
elif lower in ["no", "n", "false", "0", "cancel", "incorrect"]:
|
||||||
|
return False
|
||||||
|
return None
|
||||||
|
|
||||||
|
elif question.question_type == "choice":
|
||||||
|
# Try matching by number
|
||||||
|
if answer.isdigit():
|
||||||
|
idx = int(answer) - 1
|
||||||
|
if question.options and 0 <= idx < len(question.options):
|
||||||
|
return question.options[idx].value
|
||||||
|
|
||||||
|
# Try matching by value or label
|
||||||
|
if question.options:
|
||||||
|
for opt in question.options:
|
||||||
|
if answer.lower() == str(opt.value).lower():
|
||||||
|
return opt.value
|
||||||
|
if answer.lower() == opt.label.lower():
|
||||||
|
return opt.value
|
||||||
|
# Fuzzy match
|
||||||
|
if answer.lower() in opt.label.lower():
|
||||||
|
return opt.value
|
||||||
|
|
||||||
|
return answer
|
||||||
|
|
||||||
|
elif question.question_type == "multi_choice":
|
||||||
|
# Parse comma/and separated values
|
||||||
|
parts = re.split(r"[,&]|\band\b", answer)
|
||||||
|
values = []
|
||||||
|
|
||||||
|
for part in parts:
|
||||||
|
part = part.strip()
|
||||||
|
if not part:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try matching by number
|
||||||
|
if part.isdigit():
|
||||||
|
idx = int(part) - 1
|
||||||
|
if question.options and 0 <= idx < len(question.options):
|
||||||
|
values.append(question.options[idx].value)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try matching by value or label
|
||||||
|
if question.options:
|
||||||
|
for opt in question.options:
|
||||||
|
if part.lower() == str(opt.value).lower():
|
||||||
|
values.append(opt.value)
|
||||||
|
break
|
||||||
|
if part.lower() == opt.label.lower():
|
||||||
|
values.append(opt.value)
|
||||||
|
break
|
||||||
|
if part.lower() in opt.label.lower():
|
||||||
|
values.append(opt.value)
|
||||||
|
break
|
||||||
|
|
||||||
|
return values if values else [answer]
|
||||||
|
|
||||||
|
elif question.question_type == "parameter_select":
|
||||||
|
# Similar to multi_choice but for parameters
|
||||||
|
parts = re.split(r"[,&]|\band\b", answer)
|
||||||
|
return [p.strip() for p in parts if p.strip()]
|
||||||
|
|
||||||
|
elif question.question_type == "bounds":
|
||||||
|
# Parse bounds like "2-10" or "2 to 10" or "min 2, max 10"
|
||||||
|
bounds = {}
|
||||||
|
|
||||||
|
# Try "min to max" format
|
||||||
|
match = re.search(r"(\d+\.?\d*)\s*(?:to|-)\s*(\d+\.?\d*)", answer)
|
||||||
|
if match:
|
||||||
|
bounds["min"] = float(match.group(1))
|
||||||
|
bounds["max"] = float(match.group(2))
|
||||||
|
return bounds
|
||||||
|
|
||||||
|
# Try "min X, max Y" format
|
||||||
|
min_match = re.search(r"min[:\s]+(\d+\.?\d*)", answer.lower())
|
||||||
|
max_match = re.search(r"max[:\s]+(\d+\.?\d*)", answer.lower())
|
||||||
|
if min_match:
|
||||||
|
bounds["min"] = float(min_match.group(1))
|
||||||
|
if max_match:
|
||||||
|
bounds["max"] = float(max_match.group(1))
|
||||||
|
|
||||||
|
return bounds if bounds else None
|
||||||
|
|
||||||
|
return answer
|
||||||
|
|
||||||
|
|
||||||
|
# Import InterviewState here to avoid circular imports
|
||||||
|
from .interview_state import InterviewState
|
||||||
213
optimization_engine/interview/schemas/anti_patterns.json
Normal file
213
optimization_engine/interview/schemas/anti_patterns.json
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
{
|
||||||
|
"version": "1.0",
|
||||||
|
"description": "Common optimization setup anti-patterns and their detection",
|
||||||
|
"patterns": [
|
||||||
|
{
|
||||||
|
"id": "mass_no_constraint",
|
||||||
|
"name": "Mass Minimization Without Constraints",
|
||||||
|
"description": "Minimizing mass without any structural constraints will result in zero-thickness (or zero-size) designs that are physically impossible",
|
||||||
|
"severity": "error",
|
||||||
|
"condition": {
|
||||||
|
"type": "and",
|
||||||
|
"conditions": [
|
||||||
|
{
|
||||||
|
"type": "or",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "contains", "field": "objectives", "value": "minimize_mass"},
|
||||||
|
{"type": "contains", "field": "objectives", "value": "minimize_weight"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{"type": "empty", "field": "constraints"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"fix_suggestion": "Add at least one constraint: maximum stress, maximum displacement, or minimum frequency",
|
||||||
|
"auto_fix": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "modal_single_solution",
|
||||||
|
"name": "Modal Analysis with Single Solution Step",
|
||||||
|
"description": "When both static and modal analysis are needed, using only a single solution may miss computing one type of result",
|
||||||
|
"severity": "error",
|
||||||
|
"condition": {
|
||||||
|
"type": "and",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "contains", "field": "analysis_types", "value": "modal"},
|
||||||
|
{"type": "contains", "field": "analysis_types", "value": "static"},
|
||||||
|
{"type": "equals", "field": "solve_all_solutions", "value": false}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"fix_suggestion": "Enable 'solve all solutions' to ensure both static and modal results are computed",
|
||||||
|
"auto_fix": {
|
||||||
|
"field": "solve_all_solutions",
|
||||||
|
"value": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "bounds_too_wide",
|
||||||
|
"name": "Design Variable Bounds Too Wide",
|
||||||
|
"description": "When bounds span more than 10x the range (max/min > 10), optimization may struggle to converge efficiently",
|
||||||
|
"severity": "warning",
|
||||||
|
"condition": {
|
||||||
|
"type": "any_of",
|
||||||
|
"field": "design_variables",
|
||||||
|
"check": {
|
||||||
|
"type": "ratio_greater_than",
|
||||||
|
"field": ["max_value", "min_value"],
|
||||||
|
"value": 10
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"fix_suggestion": "Consider narrowing bounds based on engineering knowledge. Very wide bounds increase the search space exponentially.",
|
||||||
|
"auto_fix": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "stress_over_yield",
|
||||||
|
"name": "Stress Limit Exceeds Material Yield",
|
||||||
|
"description": "The specified stress constraint exceeds the material yield stress, which could allow plastic deformation",
|
||||||
|
"severity": "warning",
|
||||||
|
"condition": {
|
||||||
|
"type": "and",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "exists", "field": "constraints.max_stress"},
|
||||||
|
{"type": "exists", "field": "introspection.material"},
|
||||||
|
{
|
||||||
|
"type": "greater_than",
|
||||||
|
"field": "constraints.max_stress",
|
||||||
|
"compare_to": "material.yield_stress_mpa"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"fix_suggestion": "The stress limit should typically be the yield stress divided by a safety factor (1.5-2.0 for structural applications)",
|
||||||
|
"auto_fix": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "conflicting_objectives",
|
||||||
|
"name": "Typically Conflicting Objectives",
|
||||||
|
"description": "The selected objectives are typically in conflict. This is not an error, but expect a trade-off Pareto front rather than a single optimal solution.",
|
||||||
|
"severity": "info",
|
||||||
|
"condition": {
|
||||||
|
"type": "or",
|
||||||
|
"conditions": [
|
||||||
|
{
|
||||||
|
"type": "and",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "contains", "field": "objectives", "value": "minimize_mass"},
|
||||||
|
{"type": "contains", "field": "objectives", "value": "minimize_displacement"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "and",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "contains", "field": "objectives", "value": "minimize_mass"},
|
||||||
|
{"type": "contains", "field": "objectives", "value": "maximize_frequency"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"fix_suggestion": "Consider which objective is more important, or proceed with multi-objective optimization to explore trade-offs",
|
||||||
|
"auto_fix": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "too_many_objectives",
|
||||||
|
"name": "Too Many Objectives",
|
||||||
|
"description": "More than 3 objectives makes interpretation difficult and may not improve the optimization",
|
||||||
|
"severity": "warning",
|
||||||
|
"condition": {
|
||||||
|
"type": "count_greater_than",
|
||||||
|
"field": "objectives",
|
||||||
|
"value": 3
|
||||||
|
},
|
||||||
|
"fix_suggestion": "Consider reducing to 2-3 primary objectives. Additional goals can often be handled as constraints.",
|
||||||
|
"auto_fix": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "missing_stress_constraint",
|
||||||
|
"name": "Missing Stress Constraint",
|
||||||
|
"description": "Static analysis without a stress constraint may result in designs that fail structurally",
|
||||||
|
"severity": "warning",
|
||||||
|
"condition": {
|
||||||
|
"type": "and",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "contains", "field": "analysis_types", "value": "static"},
|
||||||
|
{"type": "not_exists", "field": "constraints.max_stress"},
|
||||||
|
{
|
||||||
|
"type": "not",
|
||||||
|
"condition": {"type": "contains", "field": "objectives", "value": "minimize_stress"}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"fix_suggestion": "Add a stress constraint based on material yield stress and appropriate safety factor",
|
||||||
|
"auto_fix": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "too_few_trials",
|
||||||
|
"name": "Insufficient Trials for Design Space",
|
||||||
|
"description": "The number of trials may be too low for the number of design variables to adequately explore the design space",
|
||||||
|
"severity": "warning",
|
||||||
|
"condition": {
|
||||||
|
"type": "less_than",
|
||||||
|
"field": "n_trials",
|
||||||
|
"compare_to": {
|
||||||
|
"type": "multiply",
|
||||||
|
"field": "design_variable_count",
|
||||||
|
"value": 15
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"fix_suggestion": "Rule of thumb: use at least 10-20 trials per design variable. Consider increasing trials.",
|
||||||
|
"auto_fix": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "infeasible_baseline",
|
||||||
|
"name": "Baseline Violates Constraints",
|
||||||
|
"description": "The nominal design already violates one or more constraints. The optimizer starts in the infeasible region.",
|
||||||
|
"severity": "warning",
|
||||||
|
"condition": {
|
||||||
|
"type": "exists",
|
||||||
|
"field": "baseline_violations"
|
||||||
|
},
|
||||||
|
"fix_suggestion": "Consider relaxing constraints or modifying the baseline design to start from a feasible point",
|
||||||
|
"auto_fix": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "no_design_variables",
|
||||||
|
"name": "No Design Variables Selected",
|
||||||
|
"description": "At least one design variable must be selected for optimization",
|
||||||
|
"severity": "error",
|
||||||
|
"condition": {
|
||||||
|
"type": "empty",
|
||||||
|
"field": "design_variables"
|
||||||
|
},
|
||||||
|
"fix_suggestion": "Select one or more parameters to vary during optimization",
|
||||||
|
"auto_fix": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "thermal_no_temperature",
|
||||||
|
"name": "Thermal Analysis Without Temperature Gradient",
|
||||||
|
"description": "Thermal analysis typically requires a temperature boundary condition or thermal load",
|
||||||
|
"severity": "warning",
|
||||||
|
"condition": {
|
||||||
|
"type": "and",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "contains", "field": "analysis_types", "value": "thermal"},
|
||||||
|
{"type": "not_exists", "field": "introspection.thermal_bc"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"fix_suggestion": "Verify thermal boundary conditions are defined in the simulation",
|
||||||
|
"auto_fix": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "single_dv_many_trials",
|
||||||
|
"name": "Single Variable with Many Trials",
|
||||||
|
"description": "For single-variable optimization, many trials may be inefficient. Consider using gradient-based methods.",
|
||||||
|
"severity": "info",
|
||||||
|
"condition": {
|
||||||
|
"type": "and",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "count_equals", "field": "design_variables", "value": 1},
|
||||||
|
{"type": "greater_than", "field": "n_trials", "value": 50}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"fix_suggestion": "For single-variable problems, L-BFGS-B or golden section search may converge faster than sampling-based optimization",
|
||||||
|
"auto_fix": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
466
optimization_engine/interview/schemas/interview_questions.json
Normal file
466
optimization_engine/interview/schemas/interview_questions.json
Normal file
@@ -0,0 +1,466 @@
|
|||||||
|
{
|
||||||
|
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||||
|
"version": "1.0",
|
||||||
|
"description": "Interview questions for Atomizer study creation",
|
||||||
|
|
||||||
|
"categories": [
|
||||||
|
{
|
||||||
|
"id": "problem_definition",
|
||||||
|
"name": "Problem Definition",
|
||||||
|
"phase": "problem_definition",
|
||||||
|
"order": 1,
|
||||||
|
"always_ask": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "objectives",
|
||||||
|
"name": "Optimization Objectives",
|
||||||
|
"phase": "objectives",
|
||||||
|
"order": 2,
|
||||||
|
"always_ask": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "constraints",
|
||||||
|
"name": "Constraints & Limits",
|
||||||
|
"phase": "constraints",
|
||||||
|
"order": 3,
|
||||||
|
"always_ask": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "design_variables",
|
||||||
|
"name": "Design Variables",
|
||||||
|
"phase": "design_variables",
|
||||||
|
"order": 4,
|
||||||
|
"always_ask": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "physics_config",
|
||||||
|
"name": "Physics Configuration",
|
||||||
|
"phase": "design_variables",
|
||||||
|
"order": 5,
|
||||||
|
"condition": {
|
||||||
|
"type": "complexity_is",
|
||||||
|
"value": ["moderate", "complex"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "optimization_settings",
|
||||||
|
"name": "Optimization Settings",
|
||||||
|
"phase": "validation",
|
||||||
|
"order": 6,
|
||||||
|
"condition": {
|
||||||
|
"type": "complexity_is",
|
||||||
|
"value": ["moderate", "complex"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "validation",
|
||||||
|
"name": "Validation",
|
||||||
|
"phase": "validation",
|
||||||
|
"order": 7,
|
||||||
|
"always_ask": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
|
||||||
|
"questions": [
|
||||||
|
{
|
||||||
|
"id": "pd_01",
|
||||||
|
"category": "problem_definition",
|
||||||
|
"text": "What engineering problem are you trying to solve with this optimization?",
|
||||||
|
"help_text": "Describe the goal in engineering terms. For example: 'Reduce the weight of a bracket while maintaining structural integrity' or 'Tune the natural frequency to avoid resonance'.",
|
||||||
|
"question_type": "text",
|
||||||
|
"options": null,
|
||||||
|
"default": null,
|
||||||
|
"validation": {
|
||||||
|
"required": true,
|
||||||
|
"min_length": 10
|
||||||
|
},
|
||||||
|
"condition": null,
|
||||||
|
"maps_to": "problem_description",
|
||||||
|
"engineering_guidance": "A clear problem statement helps ensure the optimization setup matches your actual goals."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "pd_02",
|
||||||
|
"category": "problem_definition",
|
||||||
|
"text": "What is the physical context of this component?",
|
||||||
|
"help_text": "Describe how this part is used. For example: 'Mounting bracket for an aircraft wing' or 'Support structure for a telescope mirror'.",
|
||||||
|
"question_type": "text",
|
||||||
|
"options": null,
|
||||||
|
"default": null,
|
||||||
|
"validation": {
|
||||||
|
"required": false
|
||||||
|
},
|
||||||
|
"condition": {
|
||||||
|
"type": "complexity_is",
|
||||||
|
"value": ["moderate", "complex"]
|
||||||
|
},
|
||||||
|
"maps_to": "physical_context",
|
||||||
|
"engineering_guidance": "Understanding the physical context helps validate constraint choices."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "pd_03",
|
||||||
|
"category": "problem_definition",
|
||||||
|
"text": "What type of analysis does your model use?",
|
||||||
|
"help_text": "Select all analysis types that are set up in your simulation.",
|
||||||
|
"question_type": "multi_choice",
|
||||||
|
"options": [
|
||||||
|
{"value": "static", "label": "Static structural analysis"},
|
||||||
|
{"value": "modal", "label": "Modal/frequency analysis"},
|
||||||
|
{"value": "thermal", "label": "Thermal analysis"},
|
||||||
|
{"value": "coupled_thermal_structural", "label": "Coupled thermal-structural"},
|
||||||
|
{"value": "buckling", "label": "Buckling analysis"},
|
||||||
|
{"value": "nonlinear", "label": "Nonlinear analysis"}
|
||||||
|
],
|
||||||
|
"default": ["static"],
|
||||||
|
"validation": {
|
||||||
|
"required": true,
|
||||||
|
"min_selections": 1
|
||||||
|
},
|
||||||
|
"condition": null,
|
||||||
|
"maps_to": "analysis_types",
|
||||||
|
"engineering_guidance": "The analysis type determines which extractors and solution strategies are available."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "obj_01",
|
||||||
|
"category": "objectives",
|
||||||
|
"text": "What is your primary optimization goal?",
|
||||||
|
"help_text": "Choose the main thing you want to optimize for.",
|
||||||
|
"question_type": "choice",
|
||||||
|
"options": [
|
||||||
|
{"value": "minimize_mass", "label": "Minimize mass/weight"},
|
||||||
|
{"value": "minimize_displacement", "label": "Minimize displacement (maximize stiffness)"},
|
||||||
|
{"value": "maximize_frequency", "label": "Maximize natural frequency"},
|
||||||
|
{"value": "minimize_stress", "label": "Minimize peak stress"},
|
||||||
|
{"value": "target_frequency", "label": "Target a specific frequency"},
|
||||||
|
{"value": "minimize_wavefront_error", "label": "Minimize wavefront error (optical)"},
|
||||||
|
{"value": "custom", "label": "Custom objective (I'll specify)"}
|
||||||
|
],
|
||||||
|
"default": null,
|
||||||
|
"validation": {
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
"condition": null,
|
||||||
|
"maps_to": "objectives[0].goal",
|
||||||
|
"engineering_guidance": "Mass minimization requires at least one constraint (stress, displacement, or frequency) to avoid degenerating to zero-thickness designs."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "obj_02",
|
||||||
|
"category": "objectives",
|
||||||
|
"text": "Do you have any secondary objectives?",
|
||||||
|
"help_text": "Select additional objectives if this is a multi-objective optimization. Leave empty for single-objective.",
|
||||||
|
"question_type": "multi_choice",
|
||||||
|
"options": [
|
||||||
|
{"value": "minimize_mass", "label": "Minimize mass/weight"},
|
||||||
|
{"value": "minimize_displacement", "label": "Minimize displacement"},
|
||||||
|
{"value": "maximize_frequency", "label": "Maximize frequency"},
|
||||||
|
{"value": "minimize_stress", "label": "Minimize stress"},
|
||||||
|
{"value": "none", "label": "No secondary objectives (single-objective)"}
|
||||||
|
],
|
||||||
|
"default": ["none"],
|
||||||
|
"validation": {
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
"condition": null,
|
||||||
|
"maps_to": "objectives_secondary",
|
||||||
|
"engineering_guidance": "Multi-objective optimization produces a Pareto front of trade-off solutions. More than 3 objectives can make interpretation difficult."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "obj_03",
|
||||||
|
"category": "objectives",
|
||||||
|
"text": "I've selected the following extractors for your objectives. Does this look correct?",
|
||||||
|
"help_text": "The extractor is the code that reads the physics results from the simulation. I've automatically selected based on your goals.",
|
||||||
|
"question_type": "confirm",
|
||||||
|
"options": null,
|
||||||
|
"default": true,
|
||||||
|
"validation": {
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
"condition": null,
|
||||||
|
"maps_to": "extractors_confirmed",
|
||||||
|
"engineering_guidance": null,
|
||||||
|
"dynamic_content": {
|
||||||
|
"type": "extractor_summary",
|
||||||
|
"source": "inferred_config.extractors"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "con_01",
|
||||||
|
"category": "constraints",
|
||||||
|
"text": "What is the maximum allowable stress?",
|
||||||
|
"help_text": "Enter the stress limit in MPa. This is typically based on material yield stress with a safety factor.",
|
||||||
|
"question_type": "numeric",
|
||||||
|
"options": null,
|
||||||
|
"default": null,
|
||||||
|
"validation": {
|
||||||
|
"required": true,
|
||||||
|
"min": 1,
|
||||||
|
"max": 10000,
|
||||||
|
"units": "MPa"
|
||||||
|
},
|
||||||
|
"condition": {
|
||||||
|
"type": "or",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "contains", "field": "analysis_types", "value": "static"},
|
||||||
|
{"type": "equals", "field": "objectives[0].goal", "value": "minimize_mass"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"maps_to": "constraints.max_stress",
|
||||||
|
"engineering_guidance": "For aluminum 6061-T6, yield stress is 276 MPa. A safety factor of 1.5 gives ~180 MPa limit."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "con_02",
|
||||||
|
"category": "constraints",
|
||||||
|
"text": "What is the maximum allowable displacement?",
|
||||||
|
"help_text": "Enter the displacement limit. Include units (mm or in).",
|
||||||
|
"question_type": "numeric",
|
||||||
|
"options": null,
|
||||||
|
"default": null,
|
||||||
|
"validation": {
|
||||||
|
"required": false,
|
||||||
|
"min": 0,
|
||||||
|
"units": "mm"
|
||||||
|
},
|
||||||
|
"condition": {
|
||||||
|
"type": "or",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "contains", "field": "analysis_types", "value": "static"},
|
||||||
|
{"type": "equals", "field": "objectives[0].goal", "value": "minimize_mass"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"maps_to": "constraints.max_displacement",
|
||||||
|
"engineering_guidance": "Displacement limits often come from functional requirements - clearance, alignment, etc."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "con_03",
|
||||||
|
"category": "constraints",
|
||||||
|
"text": "What is the minimum acceptable natural frequency?",
|
||||||
|
"help_text": "Enter the frequency limit in Hz.",
|
||||||
|
"question_type": "numeric",
|
||||||
|
"options": null,
|
||||||
|
"default": null,
|
||||||
|
"validation": {
|
||||||
|
"required": true,
|
||||||
|
"min": 0.1,
|
||||||
|
"units": "Hz"
|
||||||
|
},
|
||||||
|
"condition": {
|
||||||
|
"type": "contains",
|
||||||
|
"field": "analysis_types",
|
||||||
|
"value": "modal"
|
||||||
|
},
|
||||||
|
"maps_to": "constraints.min_frequency",
|
||||||
|
"engineering_guidance": "Typically set to avoid resonance with known excitation frequencies (motors, vibration sources)."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "con_04",
|
||||||
|
"category": "constraints",
|
||||||
|
"text": "Do you have a mass budget (maximum allowed mass)?",
|
||||||
|
"help_text": "Enter the mass limit in kg, or skip if not applicable.",
|
||||||
|
"question_type": "numeric",
|
||||||
|
"options": null,
|
||||||
|
"default": null,
|
||||||
|
"validation": {
|
||||||
|
"required": false,
|
||||||
|
"min": 0,
|
||||||
|
"units": "kg"
|
||||||
|
},
|
||||||
|
"condition": {
|
||||||
|
"type": "not",
|
||||||
|
"condition": {
|
||||||
|
"type": "equals",
|
||||||
|
"field": "objectives[0].goal",
|
||||||
|
"value": "minimize_mass"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"maps_to": "constraints.max_mass",
|
||||||
|
"engineering_guidance": "A mass budget is often required when mass is not the primary objective."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "con_05",
|
||||||
|
"category": "constraints",
|
||||||
|
"text": "How should constraints be handled?",
|
||||||
|
"help_text": "Hard constraints reject any design that violates them. Soft constraints allow violations but penalize the objective.",
|
||||||
|
"question_type": "choice",
|
||||||
|
"options": [
|
||||||
|
{"value": "hard", "label": "Hard constraints (reject violations)"},
|
||||||
|
{"value": "soft", "label": "Soft constraints (penalize violations)"},
|
||||||
|
{"value": "mixed", "label": "Mixed (I'll specify per constraint)"}
|
||||||
|
],
|
||||||
|
"default": "hard",
|
||||||
|
"validation": {
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
"condition": null,
|
||||||
|
"maps_to": "constraint_handling",
|
||||||
|
"engineering_guidance": "Hard constraints are more conservative. Soft constraints allow exploration but may produce infeasible final designs."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "dv_01",
|
||||||
|
"category": "design_variables",
|
||||||
|
"text": "Which parameters should be varied during optimization?",
|
||||||
|
"help_text": "Select from the detected expressions in your model, or type custom names.",
|
||||||
|
"question_type": "parameter_select",
|
||||||
|
"options": null,
|
||||||
|
"default": null,
|
||||||
|
"validation": {
|
||||||
|
"required": true,
|
||||||
|
"min_selections": 1,
|
||||||
|
"max_selections": 20
|
||||||
|
},
|
||||||
|
"condition": null,
|
||||||
|
"maps_to": "design_variables",
|
||||||
|
"engineering_guidance": "More design variables = larger search space. 3-6 is typical for efficient optimization.",
|
||||||
|
"dynamic_options": {
|
||||||
|
"type": "expressions",
|
||||||
|
"source": "introspection.expressions",
|
||||||
|
"filter": "design_variable_heuristics"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "dv_02",
|
||||||
|
"category": "design_variables",
|
||||||
|
"text": "Please confirm or adjust the bounds for each design variable.",
|
||||||
|
"help_text": "For each parameter, verify the min and max values are appropriate.",
|
||||||
|
"question_type": "bounds",
|
||||||
|
"options": null,
|
||||||
|
"default": null,
|
||||||
|
"validation": {
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
"condition": null,
|
||||||
|
"maps_to": "design_variable_bounds",
|
||||||
|
"engineering_guidance": "Bounds should be physically meaningful. Too wide (>10x range) may slow convergence.",
|
||||||
|
"dynamic_content": {
|
||||||
|
"type": "bounds_table",
|
||||||
|
"source": "answers.design_variables"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "dv_03",
|
||||||
|
"category": "design_variables",
|
||||||
|
"text": "Are there any parameters that should remain fixed (not optimized)?",
|
||||||
|
"help_text": "Select parameters that should keep their current values.",
|
||||||
|
"question_type": "parameter_select",
|
||||||
|
"options": null,
|
||||||
|
"default": null,
|
||||||
|
"validation": {
|
||||||
|
"required": false
|
||||||
|
},
|
||||||
|
"condition": {
|
||||||
|
"type": "complexity_is",
|
||||||
|
"value": ["complex"]
|
||||||
|
},
|
||||||
|
"maps_to": "fixed_parameters",
|
||||||
|
"engineering_guidance": "Fix parameters that have regulatory or interface constraints.",
|
||||||
|
"dynamic_options": {
|
||||||
|
"type": "expressions",
|
||||||
|
"source": "introspection.expressions",
|
||||||
|
"filter": "exclude_selected_dvs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "phys_01",
|
||||||
|
"category": "physics_config",
|
||||||
|
"text": "What element type does your mesh use for stress extraction?",
|
||||||
|
"help_text": "This affects which stress extractor is used.",
|
||||||
|
"question_type": "choice",
|
||||||
|
"options": [
|
||||||
|
{"value": "solid", "label": "Solid elements (CTETRA, CHEXA, CPENTA)"},
|
||||||
|
{"value": "shell", "label": "Shell elements (CQUAD4, CTRIA3)"},
|
||||||
|
{"value": "beam", "label": "Beam elements (CBAR, CBEAM)"},
|
||||||
|
{"value": "mixed", "label": "Mixed element types"},
|
||||||
|
{"value": "auto", "label": "Auto-detect from model"}
|
||||||
|
],
|
||||||
|
"default": "auto",
|
||||||
|
"validation": {
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
"condition": {
|
||||||
|
"type": "or",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "equals", "field": "objectives[0].goal", "value": "minimize_stress"},
|
||||||
|
{"type": "exists", "field": "constraints.max_stress"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"maps_to": "element_type",
|
||||||
|
"engineering_guidance": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "phys_02",
|
||||||
|
"category": "physics_config",
|
||||||
|
"text": "Your model has multiple solution steps. Should all solutions be evaluated?",
|
||||||
|
"help_text": "Some models have static + modal, or multiple load cases.",
|
||||||
|
"question_type": "confirm",
|
||||||
|
"options": null,
|
||||||
|
"default": true,
|
||||||
|
"validation": {
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
"condition": {
|
||||||
|
"type": "introspection_has",
|
||||||
|
"field": "multiple_solutions"
|
||||||
|
},
|
||||||
|
"maps_to": "solve_all_solutions",
|
||||||
|
"engineering_guidance": "If you have both static and modal analysis, both should typically be solved to get all required outputs."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "opt_01",
|
||||||
|
"category": "optimization_settings",
|
||||||
|
"text": "How many trials should be run?",
|
||||||
|
"help_text": "More trials = better exploration but longer runtime.",
|
||||||
|
"question_type": "choice",
|
||||||
|
"options": [
|
||||||
|
{"value": 50, "label": "50 trials (~quick exploration)"},
|
||||||
|
{"value": 100, "label": "100 trials (standard)"},
|
||||||
|
{"value": 200, "label": "200 trials (thorough)"},
|
||||||
|
{"value": 500, "label": "500 trials (comprehensive)"},
|
||||||
|
{"value": "custom", "label": "Custom number"}
|
||||||
|
],
|
||||||
|
"default": 100,
|
||||||
|
"validation": {
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
"condition": {
|
||||||
|
"type": "complexity_is",
|
||||||
|
"value": ["moderate", "complex"]
|
||||||
|
},
|
||||||
|
"maps_to": "n_trials",
|
||||||
|
"engineering_guidance": "Rule of thumb: 10-20 trials per design variable minimum. Complex multi-objective needs more."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "opt_02",
|
||||||
|
"category": "optimization_settings",
|
||||||
|
"text": "Would you like to enable neural acceleration?",
|
||||||
|
"help_text": "Neural surrogates can speed up optimization by reducing FEA calls. Requires initial training trials.",
|
||||||
|
"question_type": "confirm",
|
||||||
|
"options": null,
|
||||||
|
"default": false,
|
||||||
|
"validation": {
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
"condition": {
|
||||||
|
"type": "and",
|
||||||
|
"conditions": [
|
||||||
|
{"type": "greater_than", "field": "n_trials", "value": 100},
|
||||||
|
{"type": "complexity_is", "value": ["moderate", "complex"]}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"maps_to": "use_neural_acceleration",
|
||||||
|
"engineering_guidance": "Neural acceleration is most effective for expensive simulations (>30 sec/eval) with 100+ trials."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "val_01",
|
||||||
|
"category": "validation",
|
||||||
|
"text": "Would you like to run a baseline validation before starting?",
|
||||||
|
"help_text": "This runs a single FEA solve to verify extractors work correctly with nominal parameters.",
|
||||||
|
"question_type": "confirm",
|
||||||
|
"options": null,
|
||||||
|
"default": true,
|
||||||
|
"validation": {
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
"condition": null,
|
||||||
|
"maps_to": "run_baseline_validation",
|
||||||
|
"engineering_guidance": "Highly recommended. Catches configuration errors before wasting optimization time."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
262
optimization_engine/interview/schemas/materials_database.json
Normal file
262
optimization_engine/interview/schemas/materials_database.json
Normal file
@@ -0,0 +1,262 @@
|
|||||||
|
{
|
||||||
|
"version": "1.0",
|
||||||
|
"description": "Common engineering materials database for validation and guidance",
|
||||||
|
"materials": [
|
||||||
|
{
|
||||||
|
"id": "al_6061_t6",
|
||||||
|
"names": ["aluminum 6061-t6", "al6061-t6", "6061-t6", "al 6061", "6061 aluminum", "aa6061-t6"],
|
||||||
|
"category": "aluminum",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 2700,
|
||||||
|
"yield_stress_mpa": 276,
|
||||||
|
"ultimate_stress_mpa": 310,
|
||||||
|
"elastic_modulus_gpa": 68.9,
|
||||||
|
"shear_modulus_gpa": 26,
|
||||||
|
"poisson_ratio": 0.33,
|
||||||
|
"fatigue_limit_mpa": 96,
|
||||||
|
"thermal_conductivity_w_mk": 167,
|
||||||
|
"cte_per_k": 23.6e-6
|
||||||
|
},
|
||||||
|
"notes": "Common aerospace aluminum alloy. Good machinability, corrosion resistance.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 1.5,
|
||||||
|
"fatigue": 3.0,
|
||||||
|
"aerospace": 2.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "al_2024_t3",
|
||||||
|
"names": ["aluminum 2024-t3", "al2024-t3", "2024-t3", "al 2024", "2024 aluminum"],
|
||||||
|
"category": "aluminum",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 2780,
|
||||||
|
"yield_stress_mpa": 345,
|
||||||
|
"ultimate_stress_mpa": 483,
|
||||||
|
"elastic_modulus_gpa": 73.1,
|
||||||
|
"shear_modulus_gpa": 28,
|
||||||
|
"poisson_ratio": 0.33,
|
||||||
|
"fatigue_limit_mpa": 138,
|
||||||
|
"thermal_conductivity_w_mk": 121,
|
||||||
|
"cte_per_k": 23.2e-6
|
||||||
|
},
|
||||||
|
"notes": "High-strength aerospace aluminum. Excellent fatigue resistance.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 1.5,
|
||||||
|
"fatigue": 2.5,
|
||||||
|
"aerospace": 2.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "al_7075_t6",
|
||||||
|
"names": ["aluminum 7075-t6", "al7075-t6", "7075-t6", "al 7075", "7075 aluminum"],
|
||||||
|
"category": "aluminum",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 2810,
|
||||||
|
"yield_stress_mpa": 503,
|
||||||
|
"ultimate_stress_mpa": 572,
|
||||||
|
"elastic_modulus_gpa": 71.7,
|
||||||
|
"shear_modulus_gpa": 26.9,
|
||||||
|
"poisson_ratio": 0.33,
|
||||||
|
"fatigue_limit_mpa": 159,
|
||||||
|
"thermal_conductivity_w_mk": 130,
|
||||||
|
"cte_per_k": 23.4e-6
|
||||||
|
},
|
||||||
|
"notes": "Very high strength aluminum. Used in aircraft structures.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 1.5,
|
||||||
|
"fatigue": 2.5,
|
||||||
|
"aerospace": 2.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "steel_304",
|
||||||
|
"names": ["stainless steel 304", "ss304", "304 stainless", "304ss", "aisi 304"],
|
||||||
|
"category": "steel",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 8000,
|
||||||
|
"yield_stress_mpa": 215,
|
||||||
|
"ultimate_stress_mpa": 505,
|
||||||
|
"elastic_modulus_gpa": 193,
|
||||||
|
"shear_modulus_gpa": 77,
|
||||||
|
"poisson_ratio": 0.29,
|
||||||
|
"fatigue_limit_mpa": 240,
|
||||||
|
"thermal_conductivity_w_mk": 16.2,
|
||||||
|
"cte_per_k": 17.3e-6
|
||||||
|
},
|
||||||
|
"notes": "Austenitic stainless steel. Excellent corrosion resistance.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 1.5,
|
||||||
|
"fatigue": 2.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "steel_316",
|
||||||
|
"names": ["stainless steel 316", "ss316", "316 stainless", "316ss", "aisi 316"],
|
||||||
|
"category": "steel",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 8000,
|
||||||
|
"yield_stress_mpa": 290,
|
||||||
|
"ultimate_stress_mpa": 580,
|
||||||
|
"elastic_modulus_gpa": 193,
|
||||||
|
"shear_modulus_gpa": 77,
|
||||||
|
"poisson_ratio": 0.29,
|
||||||
|
"fatigue_limit_mpa": 260,
|
||||||
|
"thermal_conductivity_w_mk": 16.3,
|
||||||
|
"cte_per_k": 16e-6
|
||||||
|
},
|
||||||
|
"notes": "Marine grade stainless steel. Superior corrosion resistance to 304.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 1.5,
|
||||||
|
"fatigue": 2.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "steel_4340",
|
||||||
|
"names": ["steel 4340", "4340 steel", "aisi 4340", "4340"],
|
||||||
|
"category": "steel",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 7850,
|
||||||
|
"yield_stress_mpa": 862,
|
||||||
|
"ultimate_stress_mpa": 1034,
|
||||||
|
"elastic_modulus_gpa": 205,
|
||||||
|
"shear_modulus_gpa": 80,
|
||||||
|
"poisson_ratio": 0.29,
|
||||||
|
"fatigue_limit_mpa": 480,
|
||||||
|
"thermal_conductivity_w_mk": 44.5,
|
||||||
|
"cte_per_k": 12.3e-6
|
||||||
|
},
|
||||||
|
"notes": "High strength alloy steel. Heat treatable.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 1.5,
|
||||||
|
"fatigue": 2.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "steel_a36",
|
||||||
|
"names": ["steel a36", "a36 steel", "astm a36", "a36", "structural steel"],
|
||||||
|
"category": "steel",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 7850,
|
||||||
|
"yield_stress_mpa": 250,
|
||||||
|
"ultimate_stress_mpa": 400,
|
||||||
|
"elastic_modulus_gpa": 200,
|
||||||
|
"shear_modulus_gpa": 79,
|
||||||
|
"poisson_ratio": 0.26,
|
||||||
|
"fatigue_limit_mpa": 160,
|
||||||
|
"thermal_conductivity_w_mk": 51.9,
|
||||||
|
"cte_per_k": 11.7e-6
|
||||||
|
},
|
||||||
|
"notes": "Common structural steel. Low cost, good weldability.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 1.67,
|
||||||
|
"fatigue": 3.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "ti_6al_4v",
|
||||||
|
"names": ["titanium 6al-4v", "ti-6al-4v", "ti64", "ti 6-4", "grade 5 titanium"],
|
||||||
|
"category": "titanium",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 4430,
|
||||||
|
"yield_stress_mpa": 880,
|
||||||
|
"ultimate_stress_mpa": 950,
|
||||||
|
"elastic_modulus_gpa": 113.8,
|
||||||
|
"shear_modulus_gpa": 44,
|
||||||
|
"poisson_ratio": 0.342,
|
||||||
|
"fatigue_limit_mpa": 500,
|
||||||
|
"thermal_conductivity_w_mk": 6.7,
|
||||||
|
"cte_per_k": 8.6e-6
|
||||||
|
},
|
||||||
|
"notes": "Common aerospace titanium alloy. Excellent strength-to-weight ratio.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 1.5,
|
||||||
|
"fatigue": 2.5,
|
||||||
|
"aerospace": 2.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "ti_cp_grade2",
|
||||||
|
"names": ["titanium grade 2", "cp titanium", "commercially pure titanium", "ti grade 2"],
|
||||||
|
"category": "titanium",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 4510,
|
||||||
|
"yield_stress_mpa": 275,
|
||||||
|
"ultimate_stress_mpa": 345,
|
||||||
|
"elastic_modulus_gpa": 105,
|
||||||
|
"shear_modulus_gpa": 40,
|
||||||
|
"poisson_ratio": 0.37,
|
||||||
|
"fatigue_limit_mpa": 160,
|
||||||
|
"thermal_conductivity_w_mk": 16.4,
|
||||||
|
"cte_per_k": 8.4e-6
|
||||||
|
},
|
||||||
|
"notes": "Commercially pure titanium. Good corrosion resistance, formability.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 1.5,
|
||||||
|
"fatigue": 2.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "inconel_718",
|
||||||
|
"names": ["inconel 718", "in718", "alloy 718", "nickel 718"],
|
||||||
|
"category": "nickel_alloy",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 8190,
|
||||||
|
"yield_stress_mpa": 1100,
|
||||||
|
"ultimate_stress_mpa": 1375,
|
||||||
|
"elastic_modulus_gpa": 200,
|
||||||
|
"shear_modulus_gpa": 77,
|
||||||
|
"poisson_ratio": 0.29,
|
||||||
|
"fatigue_limit_mpa": 600,
|
||||||
|
"thermal_conductivity_w_mk": 11.4,
|
||||||
|
"cte_per_k": 13e-6
|
||||||
|
},
|
||||||
|
"notes": "Nickel superalloy. Excellent high-temperature properties.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 1.5,
|
||||||
|
"fatigue": 2.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "zerodur",
|
||||||
|
"names": ["zerodur", "schott zerodur", "zerodur glass ceramic"],
|
||||||
|
"category": "glass_ceramic",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 2530,
|
||||||
|
"yield_stress_mpa": null,
|
||||||
|
"ultimate_stress_mpa": 50,
|
||||||
|
"elastic_modulus_gpa": 90.3,
|
||||||
|
"shear_modulus_gpa": 36.3,
|
||||||
|
"poisson_ratio": 0.24,
|
||||||
|
"fatigue_limit_mpa": null,
|
||||||
|
"thermal_conductivity_w_mk": 1.46,
|
||||||
|
"cte_per_k": 0.05e-6
|
||||||
|
},
|
||||||
|
"notes": "Ultra-low expansion glass ceramic for optics. Brittle - tensile stress limit only.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 4.0,
|
||||||
|
"optical": 8.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "cfrp_unidirectional",
|
||||||
|
"names": ["carbon fiber", "cfrp", "carbon fiber reinforced polymer", "cfrp ud"],
|
||||||
|
"category": "composite",
|
||||||
|
"properties": {
|
||||||
|
"density_kg_m3": 1600,
|
||||||
|
"yield_stress_mpa": null,
|
||||||
|
"ultimate_stress_mpa": 1500,
|
||||||
|
"elastic_modulus_gpa": 135,
|
||||||
|
"shear_modulus_gpa": 5,
|
||||||
|
"poisson_ratio": 0.3,
|
||||||
|
"fatigue_limit_mpa": 600,
|
||||||
|
"thermal_conductivity_w_mk": 5,
|
||||||
|
"cte_per_k": -0.5e-6
|
||||||
|
},
|
||||||
|
"notes": "Unidirectional carbon fiber. Properties in fiber direction. Highly anisotropic.",
|
||||||
|
"recommended_safety_factors": {
|
||||||
|
"static": 2.0,
|
||||||
|
"fatigue": 3.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
558
optimization_engine/interview/study_blueprint.py
Normal file
558
optimization_engine/interview/study_blueprint.py
Normal file
@@ -0,0 +1,558 @@
|
|||||||
|
"""
|
||||||
|
Study Blueprint
|
||||||
|
|
||||||
|
Data structures for the study blueprint - the validated configuration
|
||||||
|
ready for study generation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field, asdict
|
||||||
|
from typing import Dict, List, Any, Optional
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DesignVariable:
|
||||||
|
"""Design variable specification."""
|
||||||
|
parameter: str
|
||||||
|
current_value: float
|
||||||
|
min_value: float
|
||||||
|
max_value: float
|
||||||
|
units: Optional[str] = None
|
||||||
|
is_integer: bool = False
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return asdict(self)
|
||||||
|
|
||||||
|
def to_config_format(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to optimization_config.json format."""
|
||||||
|
return {
|
||||||
|
"expression_name": self.parameter,
|
||||||
|
"bounds": [self.min_value, self.max_value],
|
||||||
|
"units": self.units or "",
|
||||||
|
"is_integer": self.is_integer,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Objective:
|
||||||
|
"""Optimization objective specification."""
|
||||||
|
name: str
|
||||||
|
goal: str # minimize, maximize, target
|
||||||
|
extractor: str # Extractor ID (e.g., "E1", "E4")
|
||||||
|
extractor_name: Optional[str] = None
|
||||||
|
extractor_params: Optional[Dict[str, Any]] = None
|
||||||
|
weight: float = 1.0
|
||||||
|
target_value: Optional[float] = None # For target objectives
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return asdict(self)
|
||||||
|
|
||||||
|
def to_config_format(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to optimization_config.json format."""
|
||||||
|
config = {
|
||||||
|
"name": self.name,
|
||||||
|
"type": self.goal,
|
||||||
|
"extractor": self.extractor,
|
||||||
|
"weight": self.weight,
|
||||||
|
}
|
||||||
|
if self.extractor_params:
|
||||||
|
config["extractor_params"] = self.extractor_params
|
||||||
|
if self.target_value is not None:
|
||||||
|
config["target"] = self.target_value
|
||||||
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Constraint:
|
||||||
|
"""Optimization constraint specification."""
|
||||||
|
name: str
|
||||||
|
constraint_type: str # max, min
|
||||||
|
threshold: float
|
||||||
|
extractor: str # Extractor ID
|
||||||
|
extractor_name: Optional[str] = None
|
||||||
|
extractor_params: Optional[Dict[str, Any]] = None
|
||||||
|
is_hard: bool = True
|
||||||
|
penalty_weight: float = 1000.0 # For soft constraints
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return asdict(self)
|
||||||
|
|
||||||
|
def to_config_format(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to optimization_config.json format."""
|
||||||
|
config = {
|
||||||
|
"name": self.name,
|
||||||
|
"type": self.constraint_type,
|
||||||
|
"threshold": self.threshold,
|
||||||
|
"extractor": self.extractor,
|
||||||
|
"hard": self.is_hard,
|
||||||
|
}
|
||||||
|
if self.extractor_params:
|
||||||
|
config["extractor_params"] = self.extractor_params
|
||||||
|
if not self.is_hard:
|
||||||
|
config["penalty_weight"] = self.penalty_weight
|
||||||
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class StudyBlueprint:
|
||||||
|
"""
|
||||||
|
Complete study blueprint ready for generation.
|
||||||
|
|
||||||
|
This is the validated configuration that will be used to create
|
||||||
|
the study files (optimization_config.json, run_optimization.py, etc.)
|
||||||
|
"""
|
||||||
|
# Study metadata
|
||||||
|
study_name: str
|
||||||
|
study_description: str = ""
|
||||||
|
interview_session_id: str = ""
|
||||||
|
|
||||||
|
# Model paths
|
||||||
|
model_path: str = ""
|
||||||
|
sim_path: str = ""
|
||||||
|
fem_path: str = ""
|
||||||
|
|
||||||
|
# Design space
|
||||||
|
design_variables: List[DesignVariable] = field(default_factory=list)
|
||||||
|
|
||||||
|
# Optimization goals
|
||||||
|
objectives: List[Objective] = field(default_factory=list)
|
||||||
|
constraints: List[Constraint] = field(default_factory=list)
|
||||||
|
|
||||||
|
# Optimization settings
|
||||||
|
protocol: str = "protocol_10_single" # or "protocol_11_multi"
|
||||||
|
n_trials: int = 100
|
||||||
|
sampler: str = "TPE"
|
||||||
|
use_neural_acceleration: bool = False
|
||||||
|
|
||||||
|
# Solver settings
|
||||||
|
solver_config: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
solve_all_solutions: bool = True
|
||||||
|
|
||||||
|
# Extractors configuration
|
||||||
|
extractors_config: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
# Validation
|
||||||
|
warnings_acknowledged: List[str] = field(default_factory=list)
|
||||||
|
baseline_validated: bool = False
|
||||||
|
baseline_results: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convert to dictionary."""
|
||||||
|
return {
|
||||||
|
"study_name": self.study_name,
|
||||||
|
"study_description": self.study_description,
|
||||||
|
"interview_session_id": self.interview_session_id,
|
||||||
|
"model_path": self.model_path,
|
||||||
|
"sim_path": self.sim_path,
|
||||||
|
"fem_path": self.fem_path,
|
||||||
|
"design_variables": [dv.to_dict() for dv in self.design_variables],
|
||||||
|
"objectives": [obj.to_dict() for obj in self.objectives],
|
||||||
|
"constraints": [con.to_dict() for con in self.constraints],
|
||||||
|
"protocol": self.protocol,
|
||||||
|
"n_trials": self.n_trials,
|
||||||
|
"sampler": self.sampler,
|
||||||
|
"use_neural_acceleration": self.use_neural_acceleration,
|
||||||
|
"solver_config": self.solver_config,
|
||||||
|
"solve_all_solutions": self.solve_all_solutions,
|
||||||
|
"extractors_config": self.extractors_config,
|
||||||
|
"warnings_acknowledged": self.warnings_acknowledged,
|
||||||
|
"baseline_validated": self.baseline_validated,
|
||||||
|
"baseline_results": self.baseline_results,
|
||||||
|
}
|
||||||
|
|
||||||
|
def to_json(self) -> str:
|
||||||
|
"""Serialize to JSON string."""
|
||||||
|
return json.dumps(self.to_dict(), indent=2)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> "StudyBlueprint":
|
||||||
|
"""Create from dictionary."""
|
||||||
|
design_variables = [
|
||||||
|
DesignVariable(**dv) for dv in data.get("design_variables", [])
|
||||||
|
]
|
||||||
|
objectives = [
|
||||||
|
Objective(**obj) for obj in data.get("objectives", [])
|
||||||
|
]
|
||||||
|
constraints = [
|
||||||
|
Constraint(**con) for con in data.get("constraints", [])
|
||||||
|
]
|
||||||
|
|
||||||
|
return cls(
|
||||||
|
study_name=data.get("study_name", ""),
|
||||||
|
study_description=data.get("study_description", ""),
|
||||||
|
interview_session_id=data.get("interview_session_id", ""),
|
||||||
|
model_path=data.get("model_path", ""),
|
||||||
|
sim_path=data.get("sim_path", ""),
|
||||||
|
fem_path=data.get("fem_path", ""),
|
||||||
|
design_variables=design_variables,
|
||||||
|
objectives=objectives,
|
||||||
|
constraints=constraints,
|
||||||
|
protocol=data.get("protocol", "protocol_10_single"),
|
||||||
|
n_trials=data.get("n_trials", 100),
|
||||||
|
sampler=data.get("sampler", "TPE"),
|
||||||
|
use_neural_acceleration=data.get("use_neural_acceleration", False),
|
||||||
|
solver_config=data.get("solver_config", {}),
|
||||||
|
solve_all_solutions=data.get("solve_all_solutions", True),
|
||||||
|
extractors_config=data.get("extractors_config", {}),
|
||||||
|
warnings_acknowledged=data.get("warnings_acknowledged", []),
|
||||||
|
baseline_validated=data.get("baseline_validated", False),
|
||||||
|
baseline_results=data.get("baseline_results"),
|
||||||
|
)
|
||||||
|
|
||||||
|
def to_config_json(self) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Convert to optimization_config.json format.
|
||||||
|
|
||||||
|
This is the format expected by the optimization runner.
|
||||||
|
"""
|
||||||
|
config = {
|
||||||
|
"study_name": self.study_name,
|
||||||
|
"description": self.study_description,
|
||||||
|
"version": "2.0",
|
||||||
|
|
||||||
|
"model": {
|
||||||
|
"part_file": self.model_path,
|
||||||
|
"sim_file": self.sim_path,
|
||||||
|
"fem_file": self.fem_path,
|
||||||
|
},
|
||||||
|
|
||||||
|
"design_variables": [
|
||||||
|
dv.to_config_format() for dv in self.design_variables
|
||||||
|
],
|
||||||
|
|
||||||
|
"objectives": [
|
||||||
|
obj.to_config_format() for obj in self.objectives
|
||||||
|
],
|
||||||
|
|
||||||
|
"constraints": [
|
||||||
|
con.to_config_format() for con in self.constraints
|
||||||
|
],
|
||||||
|
|
||||||
|
"optimization": {
|
||||||
|
"n_trials": self.n_trials,
|
||||||
|
"sampler": self.sampler,
|
||||||
|
"protocol": self.protocol,
|
||||||
|
"neural_acceleration": self.use_neural_acceleration,
|
||||||
|
},
|
||||||
|
|
||||||
|
"solver": {
|
||||||
|
"solve_all": self.solve_all_solutions,
|
||||||
|
**self.solver_config,
|
||||||
|
},
|
||||||
|
|
||||||
|
"extractors": self.extractors_config,
|
||||||
|
|
||||||
|
"_metadata": {
|
||||||
|
"interview_session_id": self.interview_session_id,
|
||||||
|
"warnings_acknowledged": self.warnings_acknowledged,
|
||||||
|
"baseline_validated": self.baseline_validated,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
def to_markdown(self) -> str:
|
||||||
|
"""Generate human-readable markdown summary."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
lines.append(f"# Study Blueprint: {self.study_name}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
if self.study_description:
|
||||||
|
lines.append(f"**Description**: {self.study_description}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Design Variables
|
||||||
|
lines.append(f"## Design Variables ({len(self.design_variables)})")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("| Parameter | Current | Min | Max | Units |")
|
||||||
|
lines.append("|-----------|---------|-----|-----|-------|")
|
||||||
|
for dv in self.design_variables:
|
||||||
|
lines.append(f"| {dv.parameter} | {dv.current_value} | {dv.min_value} | {dv.max_value} | {dv.units or '-'} |")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Objectives
|
||||||
|
lines.append(f"## Objectives ({len(self.objectives)})")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("| Name | Goal | Extractor | Weight |")
|
||||||
|
lines.append("|------|------|-----------|--------|")
|
||||||
|
for obj in self.objectives:
|
||||||
|
lines.append(f"| {obj.name} | {obj.goal} | {obj.extractor} | {obj.weight} |")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Constraints
|
||||||
|
if self.constraints:
|
||||||
|
lines.append(f"## Constraints ({len(self.constraints)})")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("| Name | Type | Threshold | Extractor | Hard? |")
|
||||||
|
lines.append("|------|------|-----------|-----------|-------|")
|
||||||
|
for con in self.constraints:
|
||||||
|
op = "<=" if con.constraint_type == "max" else ">="
|
||||||
|
lines.append(f"| {con.name} | {op} | {con.threshold} | {con.extractor} | {'Yes' if con.is_hard else 'No'} |")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Settings
|
||||||
|
lines.append("## Optimization Settings")
|
||||||
|
lines.append("")
|
||||||
|
lines.append(f"- **Protocol**: {self.protocol}")
|
||||||
|
lines.append(f"- **Trials**: {self.n_trials}")
|
||||||
|
lines.append(f"- **Sampler**: {self.sampler}")
|
||||||
|
lines.append(f"- **Neural Acceleration**: {'Enabled' if self.use_neural_acceleration else 'Disabled'}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Validation
|
||||||
|
lines.append("## Validation")
|
||||||
|
lines.append("")
|
||||||
|
lines.append(f"- **Baseline Validated**: {'Yes' if self.baseline_validated else 'No'}")
|
||||||
|
if self.warnings_acknowledged:
|
||||||
|
lines.append(f"- **Warnings Acknowledged**: {len(self.warnings_acknowledged)}")
|
||||||
|
for w in self.warnings_acknowledged:
|
||||||
|
lines.append(f" - {w}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def validate(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Validate blueprint completeness.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of validation errors (empty if valid)
|
||||||
|
"""
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
if not self.study_name:
|
||||||
|
errors.append("Study name is required")
|
||||||
|
|
||||||
|
if not self.design_variables:
|
||||||
|
errors.append("At least one design variable is required")
|
||||||
|
|
||||||
|
if not self.objectives:
|
||||||
|
errors.append("At least one objective is required")
|
||||||
|
|
||||||
|
for dv in self.design_variables:
|
||||||
|
if dv.min_value >= dv.max_value:
|
||||||
|
errors.append(f"Invalid bounds for {dv.parameter}: min >= max")
|
||||||
|
|
||||||
|
return errors
|
||||||
|
|
||||||
|
def is_multi_objective(self) -> bool:
|
||||||
|
"""Check if this is a multi-objective study."""
|
||||||
|
return len(self.objectives) > 1
|
||||||
|
|
||||||
|
def get_objective_count(self) -> int:
|
||||||
|
"""Get number of objectives."""
|
||||||
|
return len(self.objectives)
|
||||||
|
|
||||||
|
def get_constraint_count(self) -> int:
|
||||||
|
"""Get number of constraints."""
|
||||||
|
return len(self.constraints)
|
||||||
|
|
||||||
|
def get_design_variable_count(self) -> int:
|
||||||
|
"""Get number of design variables."""
|
||||||
|
return len(self.design_variables)
|
||||||
|
|
||||||
|
|
||||||
|
class BlueprintBuilder:
|
||||||
|
"""
|
||||||
|
Helper class for building StudyBlueprint from interview state.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize builder."""
|
||||||
|
from .interview_intelligence import InterviewIntelligence
|
||||||
|
self.intelligence = InterviewIntelligence()
|
||||||
|
|
||||||
|
def from_interview_state(
|
||||||
|
self,
|
||||||
|
state: "InterviewState",
|
||||||
|
introspection: Optional[Dict[str, Any]] = None
|
||||||
|
) -> StudyBlueprint:
|
||||||
|
"""
|
||||||
|
Build StudyBlueprint from completed interview state.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: Completed interview state
|
||||||
|
introspection: Optional introspection results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
StudyBlueprint ready for generation
|
||||||
|
"""
|
||||||
|
answers = state.answers
|
||||||
|
intro = introspection or state.introspection
|
||||||
|
|
||||||
|
# Build design variables
|
||||||
|
design_variables = []
|
||||||
|
for dv_data in answers.get("design_variables", []):
|
||||||
|
if isinstance(dv_data, dict):
|
||||||
|
dv = DesignVariable(
|
||||||
|
parameter=dv_data.get("parameter", ""),
|
||||||
|
current_value=dv_data.get("current_value", 0),
|
||||||
|
min_value=dv_data.get("min_value", 0),
|
||||||
|
max_value=dv_data.get("max_value", 1),
|
||||||
|
units=dv_data.get("units"),
|
||||||
|
is_integer=dv_data.get("is_integer", False),
|
||||||
|
)
|
||||||
|
design_variables.append(dv)
|
||||||
|
elif isinstance(dv_data, str):
|
||||||
|
# Just a parameter name - look up in introspection
|
||||||
|
expr = self._find_expression(dv_data, intro.get("expressions", []))
|
||||||
|
if expr:
|
||||||
|
value = expr.get("value", 0)
|
||||||
|
dv = DesignVariable(
|
||||||
|
parameter=dv_data,
|
||||||
|
current_value=value,
|
||||||
|
min_value=value * 0.5 if value > 0 else value * 1.5,
|
||||||
|
max_value=value * 1.5 if value > 0 else value * 0.5,
|
||||||
|
)
|
||||||
|
design_variables.append(dv)
|
||||||
|
|
||||||
|
# Build objectives
|
||||||
|
objectives = []
|
||||||
|
primary_goal = answers.get("objectives", [{}])
|
||||||
|
if isinstance(primary_goal, list) and primary_goal:
|
||||||
|
primary = primary_goal[0] if isinstance(primary_goal[0], dict) else {"goal": primary_goal[0]}
|
||||||
|
else:
|
||||||
|
primary = {"goal": str(primary_goal)}
|
||||||
|
|
||||||
|
# Map to extractor
|
||||||
|
extractor_sel = self.intelligence.extractor_mapper.map_goal_to_extractor(
|
||||||
|
primary.get("goal", ""),
|
||||||
|
intro
|
||||||
|
)
|
||||||
|
|
||||||
|
objectives.append(Objective(
|
||||||
|
name=primary.get("name", "primary_objective"),
|
||||||
|
goal=self._normalize_goal(primary.get("goal", "")),
|
||||||
|
extractor=extractor_sel.extractor_id,
|
||||||
|
extractor_name=extractor_sel.extractor_name,
|
||||||
|
extractor_params=extractor_sel.params,
|
||||||
|
weight=primary.get("weight", 1.0),
|
||||||
|
))
|
||||||
|
|
||||||
|
# Add secondary objectives
|
||||||
|
secondary = answers.get("objectives_secondary", [])
|
||||||
|
for sec_goal in secondary:
|
||||||
|
if sec_goal == "none" or not sec_goal:
|
||||||
|
continue
|
||||||
|
|
||||||
|
sec_sel = self.intelligence.extractor_mapper.map_goal_to_extractor(
|
||||||
|
sec_goal, intro
|
||||||
|
)
|
||||||
|
|
||||||
|
objectives.append(Objective(
|
||||||
|
name=f"secondary_{sec_goal}",
|
||||||
|
goal=self._normalize_goal(sec_goal),
|
||||||
|
extractor=sec_sel.extractor_id,
|
||||||
|
extractor_name=sec_sel.extractor_name,
|
||||||
|
extractor_params=sec_sel.params,
|
||||||
|
weight=0.5, # Default lower weight for secondary
|
||||||
|
))
|
||||||
|
|
||||||
|
# Build constraints
|
||||||
|
constraints = []
|
||||||
|
constraint_answers = answers.get("constraints", {})
|
||||||
|
constraint_handling = answers.get("constraint_handling", "hard")
|
||||||
|
|
||||||
|
if "max_stress" in constraint_answers and constraint_answers["max_stress"]:
|
||||||
|
stress_sel = self.intelligence.extractor_mapper.map_constraint_to_extractor("stress", intro)
|
||||||
|
constraints.append(Constraint(
|
||||||
|
name="max_stress",
|
||||||
|
constraint_type="max",
|
||||||
|
threshold=constraint_answers["max_stress"],
|
||||||
|
extractor=stress_sel.extractor_id,
|
||||||
|
extractor_name=stress_sel.extractor_name,
|
||||||
|
extractor_params=stress_sel.params,
|
||||||
|
is_hard=constraint_handling != "soft",
|
||||||
|
))
|
||||||
|
|
||||||
|
if "max_displacement" in constraint_answers and constraint_answers["max_displacement"]:
|
||||||
|
disp_sel = self.intelligence.extractor_mapper.map_constraint_to_extractor("displacement", intro)
|
||||||
|
constraints.append(Constraint(
|
||||||
|
name="max_displacement",
|
||||||
|
constraint_type="max",
|
||||||
|
threshold=constraint_answers["max_displacement"],
|
||||||
|
extractor=disp_sel.extractor_id,
|
||||||
|
extractor_name=disp_sel.extractor_name,
|
||||||
|
extractor_params=disp_sel.params,
|
||||||
|
is_hard=constraint_handling != "soft",
|
||||||
|
))
|
||||||
|
|
||||||
|
if "min_frequency" in constraint_answers and constraint_answers["min_frequency"]:
|
||||||
|
freq_sel = self.intelligence.extractor_mapper.map_constraint_to_extractor("frequency", intro)
|
||||||
|
constraints.append(Constraint(
|
||||||
|
name="min_frequency",
|
||||||
|
constraint_type="min",
|
||||||
|
threshold=constraint_answers["min_frequency"],
|
||||||
|
extractor=freq_sel.extractor_id,
|
||||||
|
extractor_name=freq_sel.extractor_name,
|
||||||
|
extractor_params=freq_sel.params,
|
||||||
|
is_hard=constraint_handling != "soft",
|
||||||
|
))
|
||||||
|
|
||||||
|
if "max_mass" in constraint_answers and constraint_answers["max_mass"]:
|
||||||
|
mass_sel = self.intelligence.extractor_mapper.map_constraint_to_extractor("mass", intro)
|
||||||
|
constraints.append(Constraint(
|
||||||
|
name="max_mass",
|
||||||
|
constraint_type="max",
|
||||||
|
threshold=constraint_answers["max_mass"],
|
||||||
|
extractor=mass_sel.extractor_id,
|
||||||
|
extractor_name=mass_sel.extractor_name,
|
||||||
|
is_hard=constraint_handling != "soft",
|
||||||
|
))
|
||||||
|
|
||||||
|
# Determine protocol
|
||||||
|
protocol = "protocol_11_multi" if len(objectives) > 1 else "protocol_10_single"
|
||||||
|
|
||||||
|
# Get settings
|
||||||
|
n_trials = answers.get("n_trials", 100)
|
||||||
|
if n_trials == "custom":
|
||||||
|
n_trials = 100 # Default
|
||||||
|
|
||||||
|
# Build blueprint
|
||||||
|
blueprint = StudyBlueprint(
|
||||||
|
study_name=state.study_name,
|
||||||
|
study_description=answers.get("problem_description", ""),
|
||||||
|
interview_session_id=state.session_id,
|
||||||
|
model_path=intro.get("part_file", ""),
|
||||||
|
sim_path=intro.get("sim_file", ""),
|
||||||
|
fem_path=intro.get("fem_file", ""),
|
||||||
|
design_variables=design_variables,
|
||||||
|
objectives=objectives,
|
||||||
|
constraints=constraints,
|
||||||
|
protocol=protocol,
|
||||||
|
n_trials=int(n_trials) if isinstance(n_trials, (int, float)) else 100,
|
||||||
|
sampler=self.intelligence.suggest_sampler(len(objectives), len(design_variables)),
|
||||||
|
use_neural_acceleration=answers.get("use_neural_acceleration", False),
|
||||||
|
solve_all_solutions=answers.get("solve_all_solutions", True),
|
||||||
|
warnings_acknowledged=state.warnings_acknowledged,
|
||||||
|
baseline_validated=answers.get("run_baseline_validation", False),
|
||||||
|
)
|
||||||
|
|
||||||
|
return blueprint
|
||||||
|
|
||||||
|
def _find_expression(self, name: str, expressions: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Find expression by name."""
|
||||||
|
for expr in expressions:
|
||||||
|
if expr.get("name") == name:
|
||||||
|
return expr
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _normalize_goal(self, goal: str) -> str:
|
||||||
|
"""Normalize goal string to standard format."""
|
||||||
|
goal_lower = goal.lower()
|
||||||
|
|
||||||
|
if "minimize" in goal_lower or "reduce" in goal_lower:
|
||||||
|
return "minimize"
|
||||||
|
elif "maximize" in goal_lower or "increase" in goal_lower:
|
||||||
|
return "maximize"
|
||||||
|
elif "target" in goal_lower:
|
||||||
|
return "target"
|
||||||
|
else:
|
||||||
|
return goal
|
||||||
|
|
||||||
|
|
||||||
|
# Import for type hints
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from .interview_state import InterviewState
|
||||||
589
optimization_engine/interview/study_interview.py
Normal file
589
optimization_engine/interview/study_interview.py
Normal file
@@ -0,0 +1,589 @@
|
|||||||
|
"""
|
||||||
|
Study Interview Engine
|
||||||
|
|
||||||
|
Main orchestrator for the interview process.
|
||||||
|
Coordinates question flow, state management, validation, and blueprint generation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Any, Optional, Literal
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from .interview_state import (
|
||||||
|
InterviewState,
|
||||||
|
InterviewPhase,
|
||||||
|
InterviewStateManager,
|
||||||
|
AnsweredQuestion,
|
||||||
|
LogEntry,
|
||||||
|
)
|
||||||
|
from .question_engine import QuestionEngine, Question
|
||||||
|
from .interview_presenter import InterviewPresenter, ClaudePresenter
|
||||||
|
from .engineering_validator import EngineeringValidator, ValidationResult, AntiPattern
|
||||||
|
from .interview_intelligence import InterviewIntelligence
|
||||||
|
from .study_blueprint import StudyBlueprint, BlueprintBuilder
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class InterviewSession:
|
||||||
|
"""Represents an active interview session."""
|
||||||
|
session_id: str
|
||||||
|
study_name: str
|
||||||
|
study_path: Path
|
||||||
|
started_at: datetime
|
||||||
|
current_phase: InterviewPhase
|
||||||
|
introspection: Dict[str, Any]
|
||||||
|
is_complete: bool = False
|
||||||
|
is_resumed: bool = False
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class NextAction:
|
||||||
|
"""What should happen after processing an answer."""
|
||||||
|
action_type: Literal["ask_question", "show_summary", "validate", "generate", "error", "confirm_warning"]
|
||||||
|
question: Optional[Question] = None
|
||||||
|
message: Optional[str] = None
|
||||||
|
warnings: List[str] = field(default_factory=list)
|
||||||
|
blueprint: Optional[StudyBlueprint] = None
|
||||||
|
anti_patterns: List[AntiPattern] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
class StudyInterviewEngine:
|
||||||
|
"""
|
||||||
|
Main orchestrator for study interviews.
|
||||||
|
|
||||||
|
Manages the complete interview lifecycle:
|
||||||
|
1. Start or resume interview
|
||||||
|
2. Present questions via presenter
|
||||||
|
3. Process answers with validation
|
||||||
|
4. Generate blueprint for review
|
||||||
|
5. Handle modifications
|
||||||
|
6. Coordinate study generation
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
study_path: Path,
|
||||||
|
presenter: Optional[InterviewPresenter] = None
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize interview engine.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
study_path: Path to the study directory
|
||||||
|
presenter: Presentation layer (defaults to ClaudePresenter)
|
||||||
|
"""
|
||||||
|
self.study_path = Path(study_path)
|
||||||
|
self.presenter = presenter or ClaudePresenter()
|
||||||
|
self.state_manager = InterviewStateManager(self.study_path)
|
||||||
|
self.question_engine = QuestionEngine()
|
||||||
|
self.validator = EngineeringValidator()
|
||||||
|
self.intelligence = InterviewIntelligence()
|
||||||
|
self.blueprint_builder = BlueprintBuilder()
|
||||||
|
|
||||||
|
# Current state
|
||||||
|
self.state: Optional[InterviewState] = None
|
||||||
|
self.introspection: Dict[str, Any] = {}
|
||||||
|
self.current_question: Optional[Question] = None
|
||||||
|
self.session: Optional[InterviewSession] = None
|
||||||
|
|
||||||
|
# Estimated questions (for progress)
|
||||||
|
self.estimated_total_questions = 12 # Will be updated based on complexity
|
||||||
|
|
||||||
|
def start_interview(
|
||||||
|
self,
|
||||||
|
study_name: str,
|
||||||
|
model_path: Optional[Path] = None,
|
||||||
|
introspection: Optional[Dict[str, Any]] = None
|
||||||
|
) -> InterviewSession:
|
||||||
|
"""
|
||||||
|
Start a new interview or resume existing one.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
study_name: Name for the study
|
||||||
|
model_path: Path to the NX model (optional)
|
||||||
|
introspection: Pre-computed introspection results (optional)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
InterviewSession representing the active interview
|
||||||
|
"""
|
||||||
|
# Check for existing state
|
||||||
|
existing_state = self.state_manager.load_state()
|
||||||
|
|
||||||
|
if existing_state and not existing_state.is_complete():
|
||||||
|
# Resume existing interview
|
||||||
|
self.state = existing_state
|
||||||
|
self.introspection = existing_state.introspection
|
||||||
|
|
||||||
|
self.session = InterviewSession(
|
||||||
|
session_id=existing_state.session_id,
|
||||||
|
study_name=existing_state.study_name,
|
||||||
|
study_path=self.study_path,
|
||||||
|
started_at=datetime.fromisoformat(existing_state.started_at),
|
||||||
|
current_phase=existing_state.get_phase(),
|
||||||
|
introspection=self.introspection,
|
||||||
|
is_resumed=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
return self.session
|
||||||
|
|
||||||
|
# Start new interview
|
||||||
|
self.state = InterviewState(
|
||||||
|
session_id=str(uuid.uuid4()),
|
||||||
|
study_name=study_name,
|
||||||
|
study_path=str(self.study_path),
|
||||||
|
current_phase=InterviewPhase.INTROSPECTION.value,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store introspection if provided
|
||||||
|
if introspection:
|
||||||
|
self.introspection = introspection
|
||||||
|
self.state.introspection = introspection
|
||||||
|
# Move to problem definition if introspection already done
|
||||||
|
self.state.set_phase(InterviewPhase.PROBLEM_DEFINITION)
|
||||||
|
|
||||||
|
# Save initial state
|
||||||
|
self.state_manager.save_state(self.state)
|
||||||
|
|
||||||
|
self.session = InterviewSession(
|
||||||
|
session_id=self.state.session_id,
|
||||||
|
study_name=study_name,
|
||||||
|
study_path=self.study_path,
|
||||||
|
started_at=datetime.now(),
|
||||||
|
current_phase=self.state.get_phase(),
|
||||||
|
introspection=self.introspection,
|
||||||
|
)
|
||||||
|
|
||||||
|
return self.session
|
||||||
|
|
||||||
|
def get_first_question(self) -> NextAction:
|
||||||
|
"""
|
||||||
|
Get the first question to ask.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
NextAction with the first question
|
||||||
|
"""
|
||||||
|
if self.state is None:
|
||||||
|
return NextAction(
|
||||||
|
action_type="error",
|
||||||
|
message="Interview not started. Call start_interview() first."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get next question
|
||||||
|
next_q = self.question_engine.get_next_question(self.state, self.introspection)
|
||||||
|
|
||||||
|
if next_q is None:
|
||||||
|
# No questions - should not happen at start
|
||||||
|
return NextAction(
|
||||||
|
action_type="error",
|
||||||
|
message="No questions available."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.current_question = next_q
|
||||||
|
|
||||||
|
return NextAction(
|
||||||
|
action_type="ask_question",
|
||||||
|
question=next_q,
|
||||||
|
message=self.presenter.present_question(
|
||||||
|
next_q,
|
||||||
|
question_number=self.state.current_question_count() + 1,
|
||||||
|
total_questions=self.estimated_total_questions,
|
||||||
|
category_name=self._get_category_name(next_q.category),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
def process_answer(self, answer: str) -> NextAction:
|
||||||
|
"""
|
||||||
|
Process user answer and determine next action.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
answer: User's answer (natural language)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
NextAction indicating what to do next
|
||||||
|
"""
|
||||||
|
if self.state is None or self.current_question is None:
|
||||||
|
return NextAction(
|
||||||
|
action_type="error",
|
||||||
|
message="No active question. Call get_first_question() or get_next_question()."
|
||||||
|
)
|
||||||
|
|
||||||
|
question = self.current_question
|
||||||
|
|
||||||
|
# 1. Parse answer based on question type
|
||||||
|
parsed = self.presenter.parse_response(answer, question)
|
||||||
|
|
||||||
|
# 2. Validate answer
|
||||||
|
is_valid, error_msg = self.question_engine.validate_answer(parsed, question)
|
||||||
|
if not is_valid:
|
||||||
|
return NextAction(
|
||||||
|
action_type="error",
|
||||||
|
message=f"Invalid answer: {error_msg}",
|
||||||
|
question=question, # Re-ask same question
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. Store answer
|
||||||
|
self._store_answer(question, answer, parsed)
|
||||||
|
|
||||||
|
# 4. Update phase if needed
|
||||||
|
self._update_phase(question)
|
||||||
|
|
||||||
|
# 5. Update complexity after initial questions
|
||||||
|
if question.category == "problem_definition":
|
||||||
|
self._update_complexity()
|
||||||
|
|
||||||
|
# 6. Check for warnings/anti-patterns
|
||||||
|
anti_patterns = self.validator.detect_anti_patterns(self.state, self.introspection)
|
||||||
|
new_warnings = [ap.description for ap in anti_patterns if ap.severity in ["error", "warning"]]
|
||||||
|
|
||||||
|
# Filter to only new warnings
|
||||||
|
existing_warnings = set(self.state.warnings)
|
||||||
|
for w in new_warnings:
|
||||||
|
if w not in existing_warnings:
|
||||||
|
self.state.add_warning(w)
|
||||||
|
|
||||||
|
# 7. Check if we should show anti-pattern warnings
|
||||||
|
blocking_patterns = [ap for ap in anti_patterns if ap.severity == "error" and not ap.acknowledged]
|
||||||
|
if blocking_patterns:
|
||||||
|
return NextAction(
|
||||||
|
action_type="confirm_warning",
|
||||||
|
message=self._format_anti_pattern_warnings(blocking_patterns),
|
||||||
|
anti_patterns=blocking_patterns,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 8. Get next question
|
||||||
|
next_q = self.question_engine.get_next_question(self.state, self.introspection)
|
||||||
|
|
||||||
|
if next_q is None:
|
||||||
|
# Interview complete - generate blueprint
|
||||||
|
return self._finalize_interview()
|
||||||
|
|
||||||
|
self.current_question = next_q
|
||||||
|
|
||||||
|
return NextAction(
|
||||||
|
action_type="ask_question",
|
||||||
|
question=next_q,
|
||||||
|
message=self.presenter.present_question(
|
||||||
|
next_q,
|
||||||
|
question_number=self.state.current_question_count() + 1,
|
||||||
|
total_questions=self.estimated_total_questions,
|
||||||
|
category_name=self._get_category_name(next_q.category),
|
||||||
|
),
|
||||||
|
warnings=[w for w in self.state.warnings if w not in self.state.warnings_acknowledged],
|
||||||
|
)
|
||||||
|
|
||||||
|
def acknowledge_warnings(self, acknowledged: bool = True) -> NextAction:
|
||||||
|
"""
|
||||||
|
Acknowledge current warnings and continue.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
acknowledged: Whether user acknowledged warnings
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
NextAction (continue or abort)
|
||||||
|
"""
|
||||||
|
if not acknowledged:
|
||||||
|
return NextAction(
|
||||||
|
action_type="error",
|
||||||
|
message="Interview paused. Please fix the issues and restart, or acknowledge warnings to proceed."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mark all current warnings as acknowledged
|
||||||
|
for w in self.state.warnings:
|
||||||
|
self.state.acknowledge_warning(w)
|
||||||
|
|
||||||
|
# Continue to next question
|
||||||
|
next_q = self.question_engine.get_next_question(self.state, self.introspection)
|
||||||
|
|
||||||
|
if next_q is None:
|
||||||
|
return self._finalize_interview()
|
||||||
|
|
||||||
|
self.current_question = next_q
|
||||||
|
|
||||||
|
return NextAction(
|
||||||
|
action_type="ask_question",
|
||||||
|
question=next_q,
|
||||||
|
message=self.presenter.present_question(
|
||||||
|
next_q,
|
||||||
|
question_number=self.state.current_question_count() + 1,
|
||||||
|
total_questions=self.estimated_total_questions,
|
||||||
|
category_name=self._get_category_name(next_q.category),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
def generate_blueprint(self) -> StudyBlueprint:
|
||||||
|
"""
|
||||||
|
Generate study blueprint from interview state.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
StudyBlueprint ready for generation
|
||||||
|
"""
|
||||||
|
if self.state is None:
|
||||||
|
raise ValueError("No interview state available")
|
||||||
|
|
||||||
|
blueprint = self.blueprint_builder.from_interview_state(
|
||||||
|
self.state,
|
||||||
|
self.introspection
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store in state
|
||||||
|
self.state.blueprint = blueprint.to_dict()
|
||||||
|
self.state_manager.save_state(self.state)
|
||||||
|
|
||||||
|
return blueprint
|
||||||
|
|
||||||
|
def modify_blueprint(self, changes: Dict[str, Any]) -> StudyBlueprint:
|
||||||
|
"""
|
||||||
|
Apply what-if modifications to the blueprint.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
changes: Dictionary of changes to apply
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Modified StudyBlueprint
|
||||||
|
"""
|
||||||
|
if self.state is None or self.state.blueprint is None:
|
||||||
|
raise ValueError("No blueprint available to modify")
|
||||||
|
|
||||||
|
blueprint = StudyBlueprint.from_dict(self.state.blueprint)
|
||||||
|
|
||||||
|
# Apply changes
|
||||||
|
for key, value in changes.items():
|
||||||
|
if key == "n_trials":
|
||||||
|
blueprint.n_trials = int(value)
|
||||||
|
elif key == "sampler":
|
||||||
|
blueprint.sampler = value
|
||||||
|
elif key == "add_constraint":
|
||||||
|
# Handle adding constraints
|
||||||
|
pass
|
||||||
|
elif key == "remove_constraint":
|
||||||
|
# Handle removing constraints
|
||||||
|
pass
|
||||||
|
# Add more modification types as needed
|
||||||
|
|
||||||
|
# Re-validate
|
||||||
|
validation_errors = blueprint.validate()
|
||||||
|
if validation_errors:
|
||||||
|
raise ValueError(f"Invalid modifications: {validation_errors}")
|
||||||
|
|
||||||
|
# Update state
|
||||||
|
self.state.blueprint = blueprint.to_dict()
|
||||||
|
self.state_manager.save_state(self.state)
|
||||||
|
|
||||||
|
return blueprint
|
||||||
|
|
||||||
|
def confirm_blueprint(self) -> bool:
|
||||||
|
"""
|
||||||
|
Confirm blueprint and mark interview as complete.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if successful
|
||||||
|
"""
|
||||||
|
if self.state is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.state.set_phase(InterviewPhase.COMPLETE)
|
||||||
|
self.state_manager.save_state(self.state)
|
||||||
|
|
||||||
|
# Finalize log
|
||||||
|
self.state_manager.finalize_log(self.state)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_current_state(self) -> Optional[InterviewState]:
|
||||||
|
"""Get current interview state."""
|
||||||
|
return self.state
|
||||||
|
|
||||||
|
def get_progress(self) -> str:
|
||||||
|
"""Get formatted progress string."""
|
||||||
|
if self.state is None:
|
||||||
|
return "No active interview"
|
||||||
|
|
||||||
|
return self.presenter.show_progress(
|
||||||
|
self.state.current_question_count(),
|
||||||
|
self.estimated_total_questions,
|
||||||
|
self._get_phase_name(self.state.current_phase)
|
||||||
|
)
|
||||||
|
|
||||||
|
def reset_interview(self) -> None:
|
||||||
|
"""Reset interview and start fresh."""
|
||||||
|
self.state_manager.delete_state()
|
||||||
|
self.state = None
|
||||||
|
self.current_question = None
|
||||||
|
self.session = None
|
||||||
|
|
||||||
|
# Private methods
|
||||||
|
|
||||||
|
def _store_answer(self, question: Question, raw: str, parsed: Any) -> None:
|
||||||
|
"""Store answer in state."""
|
||||||
|
# Create answered question record
|
||||||
|
answered = AnsweredQuestion(
|
||||||
|
question_id=question.id,
|
||||||
|
answered_at=datetime.now().isoformat(),
|
||||||
|
raw_response=raw,
|
||||||
|
parsed_value=parsed,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.state.add_answered_question(answered)
|
||||||
|
|
||||||
|
# Map to answer field
|
||||||
|
self._map_answer_to_field(question.maps_to, parsed)
|
||||||
|
|
||||||
|
# Create log entry
|
||||||
|
log_entry = LogEntry(
|
||||||
|
timestamp=datetime.now(),
|
||||||
|
question_id=question.id,
|
||||||
|
question_text=question.text,
|
||||||
|
answer_raw=raw,
|
||||||
|
answer_parsed=parsed,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.state_manager.append_log(log_entry)
|
||||||
|
self.state_manager.save_state(self.state)
|
||||||
|
|
||||||
|
def _map_answer_to_field(self, maps_to: str, value: Any) -> None:
|
||||||
|
"""Map parsed value to the appropriate answer field."""
|
||||||
|
if not maps_to:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Handle array indexing: "objectives[0].goal"
|
||||||
|
if "[" in maps_to:
|
||||||
|
import re
|
||||||
|
match = re.match(r"(\w+)\[(\d+)\]\.(\w+)", maps_to)
|
||||||
|
if match:
|
||||||
|
array_name, idx, field = match.groups()
|
||||||
|
idx = int(idx)
|
||||||
|
|
||||||
|
# Ensure array exists
|
||||||
|
if array_name not in self.state.answers:
|
||||||
|
self.state.answers[array_name] = []
|
||||||
|
|
||||||
|
# Ensure element exists
|
||||||
|
while len(self.state.answers[array_name]) <= idx:
|
||||||
|
self.state.answers[array_name].append({})
|
||||||
|
|
||||||
|
self.state.answers[array_name][idx][field] = value
|
||||||
|
return
|
||||||
|
|
||||||
|
# Handle nested fields: "constraints.max_stress"
|
||||||
|
if "." in maps_to:
|
||||||
|
parts = maps_to.split(".")
|
||||||
|
current = self.state.answers
|
||||||
|
|
||||||
|
for part in parts[:-1]:
|
||||||
|
if part not in current:
|
||||||
|
current[part] = {}
|
||||||
|
current = current[part]
|
||||||
|
|
||||||
|
current[parts[-1]] = value
|
||||||
|
return
|
||||||
|
|
||||||
|
# Simple field
|
||||||
|
self.state.set_answer(maps_to, value)
|
||||||
|
|
||||||
|
def _update_phase(self, question: Question) -> None:
|
||||||
|
"""Update interview phase based on question category."""
|
||||||
|
category_to_phase = {
|
||||||
|
"problem_definition": InterviewPhase.PROBLEM_DEFINITION,
|
||||||
|
"objectives": InterviewPhase.OBJECTIVES,
|
||||||
|
"constraints": InterviewPhase.CONSTRAINTS,
|
||||||
|
"design_variables": InterviewPhase.DESIGN_VARIABLES,
|
||||||
|
"physics_config": InterviewPhase.DESIGN_VARIABLES,
|
||||||
|
"optimization_settings": InterviewPhase.VALIDATION,
|
||||||
|
"validation": InterviewPhase.VALIDATION,
|
||||||
|
}
|
||||||
|
|
||||||
|
new_phase = category_to_phase.get(question.category)
|
||||||
|
if new_phase and new_phase != self.state.get_phase():
|
||||||
|
self.state.set_phase(new_phase)
|
||||||
|
|
||||||
|
def _update_complexity(self) -> None:
|
||||||
|
"""Update complexity estimate after initial questions."""
|
||||||
|
complexity = self.intelligence.determine_complexity(self.state, self.introspection)
|
||||||
|
self.state.complexity = complexity
|
||||||
|
|
||||||
|
# Adjust estimated questions
|
||||||
|
if complexity == "simple":
|
||||||
|
self.estimated_total_questions = 8
|
||||||
|
elif complexity == "moderate":
|
||||||
|
self.estimated_total_questions = 12
|
||||||
|
else:
|
||||||
|
self.estimated_total_questions = 16
|
||||||
|
|
||||||
|
def _finalize_interview(self) -> NextAction:
|
||||||
|
"""Finalize interview and show summary."""
|
||||||
|
self.state.set_phase(InterviewPhase.REVIEW)
|
||||||
|
|
||||||
|
blueprint = self.generate_blueprint()
|
||||||
|
|
||||||
|
return NextAction(
|
||||||
|
action_type="show_summary",
|
||||||
|
message=self.presenter.show_summary(blueprint),
|
||||||
|
blueprint=blueprint,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _format_anti_pattern_warnings(self, patterns: List[AntiPattern]) -> str:
|
||||||
|
"""Format anti-pattern warnings for display."""
|
||||||
|
lines = ["**Issues Detected:**", ""]
|
||||||
|
|
||||||
|
for ap in patterns:
|
||||||
|
severity_icon = "X" if ap.severity == "error" else "!"
|
||||||
|
lines.append(f"[{severity_icon}] **{ap.name}**")
|
||||||
|
lines.append(f" {ap.description}")
|
||||||
|
if ap.fix_suggestion:
|
||||||
|
lines.append(f" *Suggestion*: {ap.fix_suggestion}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
lines.append("Would you like to proceed anyway? Type **yes** to continue or **no** to go back and fix.")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def _get_category_name(self, category: str) -> str:
|
||||||
|
"""Get human-readable category name."""
|
||||||
|
names = {
|
||||||
|
"problem_definition": "Problem Definition",
|
||||||
|
"objectives": "Optimization Goals",
|
||||||
|
"constraints": "Constraints",
|
||||||
|
"design_variables": "Design Variables",
|
||||||
|
"physics_config": "Physics Configuration",
|
||||||
|
"optimization_settings": "Optimization Settings",
|
||||||
|
"validation": "Validation",
|
||||||
|
}
|
||||||
|
return names.get(category, category.replace("_", " ").title())
|
||||||
|
|
||||||
|
def _get_phase_name(self, phase: str) -> str:
|
||||||
|
"""Get human-readable phase name."""
|
||||||
|
names = {
|
||||||
|
"introspection": "Model Analysis",
|
||||||
|
"problem_definition": "Problem Definition",
|
||||||
|
"objectives": "Setting Objectives",
|
||||||
|
"constraints": "Defining Constraints",
|
||||||
|
"design_variables": "Selecting Variables",
|
||||||
|
"validation": "Validation",
|
||||||
|
"review": "Review & Confirm",
|
||||||
|
"complete": "Complete",
|
||||||
|
}
|
||||||
|
return names.get(phase, phase.replace("_", " ").title())
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience function for quick interview
|
||||||
|
def run_interview(
|
||||||
|
study_path: Path,
|
||||||
|
study_name: str,
|
||||||
|
introspection: Optional[Dict[str, Any]] = None
|
||||||
|
) -> StudyInterviewEngine:
|
||||||
|
"""
|
||||||
|
Create and start an interview engine.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
study_path: Path to study directory
|
||||||
|
study_name: Study name
|
||||||
|
introspection: Optional introspection results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Configured StudyInterviewEngine ready for use
|
||||||
|
"""
|
||||||
|
engine = StudyInterviewEngine(study_path)
|
||||||
|
engine.start_interview(study_name, introspection=introspection)
|
||||||
|
return engine
|
||||||
51
optimization_engine/nx/__init__.py
Normal file
51
optimization_engine/nx/__init__.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
"""
|
||||||
|
NX Integration
|
||||||
|
==============
|
||||||
|
|
||||||
|
Siemens NX and Nastran integration modules.
|
||||||
|
|
||||||
|
Modules:
|
||||||
|
- solver: NXSolver for running simulations
|
||||||
|
- updater: NXParameterUpdater for design updates
|
||||||
|
- session_manager: NX session lifecycle management
|
||||||
|
- solve_simulation: Low-level simulation execution
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Lazy imports to avoid import errors when NX modules aren't available
|
||||||
|
def __getattr__(name):
|
||||||
|
if name == 'NXSolver':
|
||||||
|
from .solver import NXSolver
|
||||||
|
return NXSolver
|
||||||
|
elif name == 'run_nx_simulation':
|
||||||
|
from .solver import run_nx_simulation
|
||||||
|
return run_nx_simulation
|
||||||
|
elif name == 'NXParameterUpdater':
|
||||||
|
from .updater import NXParameterUpdater
|
||||||
|
return NXParameterUpdater
|
||||||
|
elif name == 'update_nx_model':
|
||||||
|
from .updater import update_nx_model
|
||||||
|
return update_nx_model
|
||||||
|
elif name == 'NXSessionManager':
|
||||||
|
from .session_manager import NXSessionManager
|
||||||
|
return NXSessionManager
|
||||||
|
elif name == 'NXSessionInfo':
|
||||||
|
from .session_manager import NXSessionInfo
|
||||||
|
return NXSessionInfo
|
||||||
|
elif name == 'ModelCleanup':
|
||||||
|
from .model_cleanup import ModelCleanup
|
||||||
|
return ModelCleanup
|
||||||
|
elif name == 'cleanup_substudy':
|
||||||
|
from .model_cleanup import cleanup_substudy
|
||||||
|
return cleanup_substudy
|
||||||
|
raise AttributeError(f"module 'optimization_engine.nx' has no attribute '{name}'")
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'NXSolver',
|
||||||
|
'run_nx_simulation',
|
||||||
|
'NXParameterUpdater',
|
||||||
|
'update_nx_model',
|
||||||
|
'NXSessionManager',
|
||||||
|
'NXSessionInfo',
|
||||||
|
'ModelCleanup',
|
||||||
|
'cleanup_substudy',
|
||||||
|
]
|
||||||
@@ -11,7 +11,7 @@ import subprocess
|
|||||||
import time
|
import time
|
||||||
import shutil
|
import shutil
|
||||||
import os
|
import os
|
||||||
from optimization_engine.nx_session_manager import NXSessionManager
|
from optimization_engine.nx.session_manager import NXSessionManager
|
||||||
|
|
||||||
|
|
||||||
class NXSolver:
|
class NXSolver:
|
||||||
@@ -242,19 +242,28 @@ class NXSolver:
|
|||||||
Format: [unit]name=value
|
Format: [unit]name=value
|
||||||
Example: [mm]whiffle_min=42.5
|
Example: [mm]whiffle_min=42.5
|
||||||
"""
|
"""
|
||||||
# Default unit mapping (could be extended or made configurable)
|
# Default unit mapping - MUST match NX model expression units exactly
|
||||||
|
# Verified against working turbo V1 runs
|
||||||
UNIT_MAPPING = {
|
UNIT_MAPPING = {
|
||||||
# Length parameters (mm)
|
# Length parameters (mm)
|
||||||
'whiffle_min': 'mm',
|
'whiffle_min': 'mm',
|
||||||
'whiffle_triangle_closeness': 'mm',
|
'whiffle_triangle_closeness': 'mm',
|
||||||
'inner_circular_rib_dia': 'mm',
|
'inner_circular_rib_dia': 'mm',
|
||||||
'outer_circular_rib_offset_from_outer': 'mm',
|
'outer_circular_rib_offset_from_outer': 'mm',
|
||||||
|
'Pocket_Radius': 'mm',
|
||||||
|
'center_thickness': 'mm',
|
||||||
|
# Lateral pivot/closeness - mm in NX model (verified from V1)
|
||||||
'lateral_outer_pivot': 'mm',
|
'lateral_outer_pivot': 'mm',
|
||||||
'lateral_inner_pivot': 'mm',
|
'lateral_inner_pivot': 'mm',
|
||||||
'lateral_middle_pivot': 'mm',
|
'lateral_middle_pivot': 'mm',
|
||||||
'lateral_closeness': 'mm',
|
'lateral_closeness': 'mm',
|
||||||
# Angle parameters (degrees)
|
# Rib/face thickness parameters (mm)
|
||||||
'whiffle_outer_to_vertical': 'Degrees',
|
'rib_thickness': 'mm',
|
||||||
|
'ribs_circular_thk': 'mm',
|
||||||
|
'rib_thickness_lateral_truss': 'mm',
|
||||||
|
'mirror_face_thickness': 'mm',
|
||||||
|
# Angle parameters (Degrees) - verified from working V1 runs
|
||||||
|
'whiffle_outer_to_vertical': 'Degrees', # NX expects Degrees (verified V1)
|
||||||
'lateral_inner_angle': 'Degrees',
|
'lateral_inner_angle': 'Degrees',
|
||||||
'lateral_outer_angle': 'Degrees',
|
'lateral_outer_angle': 'Degrees',
|
||||||
'blank_backface_angle': 'Degrees',
|
'blank_backface_angle': 'Degrees',
|
||||||
@@ -1,278 +0,0 @@
|
|||||||
"""
|
|
||||||
Robust OP2 Extraction - Handles pyNastran FATAL flag issues gracefully.
|
|
||||||
|
|
||||||
This module provides a more robust OP2 extraction that:
|
|
||||||
1. Catches pyNastran FATAL flag exceptions
|
|
||||||
2. Checks if eigenvalues were actually extracted despite the flag
|
|
||||||
3. Falls back to F06 extraction if OP2 fails
|
|
||||||
4. Logs detailed failure information
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
from optimization_engine.op2_extractor import robust_extract_first_frequency
|
|
||||||
|
|
||||||
frequency = robust_extract_first_frequency(
|
|
||||||
op2_file=Path("results.op2"),
|
|
||||||
mode_number=1,
|
|
||||||
f06_file=Path("results.f06"), # Optional fallback
|
|
||||||
verbose=True
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Optional, Tuple
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
def robust_extract_first_frequency(
|
|
||||||
op2_file: Path,
|
|
||||||
mode_number: int = 1,
|
|
||||||
f06_file: Optional[Path] = None,
|
|
||||||
verbose: bool = False
|
|
||||||
) -> float:
|
|
||||||
"""
|
|
||||||
Robustly extract natural frequency from OP2 file, handling pyNastran issues.
|
|
||||||
|
|
||||||
This function attempts multiple strategies:
|
|
||||||
1. Standard pyNastran OP2 reading
|
|
||||||
2. Force reading with debug=False to ignore FATAL flags
|
|
||||||
3. Partial OP2 reading (extract eigenvalues even if FATAL flag exists)
|
|
||||||
4. Fallback to F06 file parsing (if provided)
|
|
||||||
|
|
||||||
Args:
|
|
||||||
op2_file: Path to OP2 output file
|
|
||||||
mode_number: Mode number to extract (1-based index)
|
|
||||||
f06_file: Optional F06 file for fallback extraction
|
|
||||||
verbose: Print detailed extraction information
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Natural frequency in Hz
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If frequency cannot be extracted by any method
|
|
||||||
"""
|
|
||||||
from pyNastran.op2.op2 import OP2
|
|
||||||
|
|
||||||
if not op2_file.exists():
|
|
||||||
raise FileNotFoundError(f"OP2 file not found: {op2_file}")
|
|
||||||
|
|
||||||
# Strategy 1: Try standard OP2 reading
|
|
||||||
try:
|
|
||||||
if verbose:
|
|
||||||
print(f"[OP2 EXTRACT] Attempting standard read: {op2_file.name}")
|
|
||||||
|
|
||||||
model = OP2()
|
|
||||||
model.read_op2(str(op2_file))
|
|
||||||
|
|
||||||
if hasattr(model, 'eigenvalues') and len(model.eigenvalues) > 0:
|
|
||||||
frequency = _extract_frequency_from_model(model, mode_number)
|
|
||||||
if verbose:
|
|
||||||
print(f"[OP2 EXTRACT] ✓ Success (standard read): {frequency:.6f} Hz")
|
|
||||||
return frequency
|
|
||||||
else:
|
|
||||||
raise ValueError("No eigenvalues found in OP2 file")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
if verbose:
|
|
||||||
print(f"[OP2 EXTRACT] ✗ Standard read failed: {str(e)[:100]}")
|
|
||||||
|
|
||||||
# Check if this is a FATAL flag issue
|
|
||||||
is_fatal_flag = 'FATAL' in str(e) and 'op2_reader' in str(e.__class__.__module__)
|
|
||||||
|
|
||||||
if is_fatal_flag:
|
|
||||||
# Strategy 2: Try reading with more lenient settings
|
|
||||||
if verbose:
|
|
||||||
print(f"[OP2 EXTRACT] Detected pyNastran FATAL flag issue")
|
|
||||||
print(f"[OP2 EXTRACT] Attempting partial extraction...")
|
|
||||||
|
|
||||||
try:
|
|
||||||
model = OP2()
|
|
||||||
# Try to read with debug=False and skip_undefined_matrices=True
|
|
||||||
model.read_op2(
|
|
||||||
str(op2_file),
|
|
||||||
debug=False,
|
|
||||||
skip_undefined_matrices=True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check if eigenvalues were extracted despite FATAL
|
|
||||||
if hasattr(model, 'eigenvalues') and len(model.eigenvalues) > 0:
|
|
||||||
frequency = _extract_frequency_from_model(model, mode_number)
|
|
||||||
if verbose:
|
|
||||||
print(f"[OP2 EXTRACT] ✓ Success (lenient mode): {frequency:.6f} Hz")
|
|
||||||
print(f"[OP2 EXTRACT] Note: pyNastran reported FATAL but data is valid!")
|
|
||||||
return frequency
|
|
||||||
|
|
||||||
except Exception as e2:
|
|
||||||
if verbose:
|
|
||||||
print(f"[OP2 EXTRACT] ✗ Lenient read also failed: {str(e2)[:100]}")
|
|
||||||
|
|
||||||
# Strategy 3: Fallback to F06 parsing
|
|
||||||
if f06_file and f06_file.exists():
|
|
||||||
if verbose:
|
|
||||||
print(f"[OP2 EXTRACT] Falling back to F06 extraction: {f06_file.name}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
frequency = extract_frequency_from_f06(f06_file, mode_number, verbose=verbose)
|
|
||||||
if verbose:
|
|
||||||
print(f"[OP2 EXTRACT] ✓ Success (F06 fallback): {frequency:.6f} Hz")
|
|
||||||
return frequency
|
|
||||||
|
|
||||||
except Exception as e3:
|
|
||||||
if verbose:
|
|
||||||
print(f"[OP2 EXTRACT] ✗ F06 extraction failed: {str(e3)}")
|
|
||||||
|
|
||||||
# All strategies failed
|
|
||||||
raise ValueError(
|
|
||||||
f"Could not extract frequency from OP2 file: {op2_file.name}. "
|
|
||||||
f"Original error: {str(e)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _extract_frequency_from_model(model, mode_number: int) -> float:
|
|
||||||
"""Extract frequency from loaded OP2 model."""
|
|
||||||
if not hasattr(model, 'eigenvalues') or len(model.eigenvalues) == 0:
|
|
||||||
raise ValueError("No eigenvalues found in model")
|
|
||||||
|
|
||||||
# Get first subcase
|
|
||||||
subcase = list(model.eigenvalues.keys())[0]
|
|
||||||
eig_obj = model.eigenvalues[subcase]
|
|
||||||
|
|
||||||
# Check if mode exists
|
|
||||||
if mode_number > len(eig_obj.eigenvalues):
|
|
||||||
raise ValueError(
|
|
||||||
f"Mode {mode_number} not found. "
|
|
||||||
f"Only {len(eig_obj.eigenvalues)} modes available"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extract eigenvalue and convert to frequency
|
|
||||||
eigenvalue = eig_obj.eigenvalues[mode_number - 1]
|
|
||||||
angular_freq = np.sqrt(abs(eigenvalue)) # Use abs to handle numerical precision issues
|
|
||||||
frequency_hz = angular_freq / (2 * np.pi)
|
|
||||||
|
|
||||||
return float(frequency_hz)
|
|
||||||
|
|
||||||
|
|
||||||
def extract_frequency_from_f06(
|
|
||||||
f06_file: Path,
|
|
||||||
mode_number: int = 1,
|
|
||||||
verbose: bool = False
|
|
||||||
) -> float:
|
|
||||||
"""
|
|
||||||
Extract natural frequency from F06 text file (fallback method).
|
|
||||||
|
|
||||||
Parses the F06 file to find eigenvalue results table and extracts frequency.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
f06_file: Path to F06 output file
|
|
||||||
mode_number: Mode number to extract (1-based index)
|
|
||||||
verbose: Print extraction details
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Natural frequency in Hz
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If frequency cannot be found in F06
|
|
||||||
"""
|
|
||||||
if not f06_file.exists():
|
|
||||||
raise FileNotFoundError(f"F06 file not found: {f06_file}")
|
|
||||||
|
|
||||||
with open(f06_file, 'r', encoding='latin-1', errors='ignore') as f:
|
|
||||||
content = f.read()
|
|
||||||
|
|
||||||
# Look for eigenvalue table
|
|
||||||
# Nastran F06 format has eigenvalue results like:
|
|
||||||
# R E A L E I G E N V A L U E S
|
|
||||||
# MODE EXTRACTION EIGENVALUE RADIANS CYCLES GENERALIZED GENERALIZED
|
|
||||||
# NO. ORDER MASS STIFFNESS
|
|
||||||
# 1 1 -6.602743E+04 2.569656E+02 4.089338E+01 1.000000E+00 6.602743E+04
|
|
||||||
|
|
||||||
lines = content.split('\n')
|
|
||||||
|
|
||||||
# Find eigenvalue table
|
|
||||||
eigenvalue_section_start = None
|
|
||||||
for i, line in enumerate(lines):
|
|
||||||
if 'R E A L E I G E N V A L U E S' in line:
|
|
||||||
eigenvalue_section_start = i
|
|
||||||
break
|
|
||||||
|
|
||||||
if eigenvalue_section_start is None:
|
|
||||||
raise ValueError("Eigenvalue table not found in F06 file")
|
|
||||||
|
|
||||||
# Parse eigenvalue table (starts a few lines after header)
|
|
||||||
for i in range(eigenvalue_section_start + 3, min(eigenvalue_section_start + 100, len(lines))):
|
|
||||||
line = lines[i].strip()
|
|
||||||
|
|
||||||
if not line or line.startswith('1'): # Page break
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Parse line with mode data
|
|
||||||
parts = line.split()
|
|
||||||
if len(parts) >= 5:
|
|
||||||
try:
|
|
||||||
mode_num = int(parts[0])
|
|
||||||
if mode_num == mode_number:
|
|
||||||
# Frequency is in column 5 (CYCLES)
|
|
||||||
frequency = float(parts[4])
|
|
||||||
if verbose:
|
|
||||||
print(f"[F06 EXTRACT] Found mode {mode_num}: {frequency:.6f} Hz")
|
|
||||||
return frequency
|
|
||||||
except (ValueError, IndexError):
|
|
||||||
continue
|
|
||||||
|
|
||||||
raise ValueError(f"Mode {mode_number} not found in F06 eigenvalue table")
|
|
||||||
|
|
||||||
|
|
||||||
def validate_op2_file(op2_file: Path, f06_file: Optional[Path] = None) -> Tuple[bool, str]:
|
|
||||||
"""
|
|
||||||
Validate if an OP2 file contains usable eigenvalue data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
op2_file: Path to OP2 file
|
|
||||||
f06_file: Optional F06 file for cross-reference
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
(is_valid, message): Tuple of validation status and explanation
|
|
||||||
"""
|
|
||||||
if not op2_file.exists():
|
|
||||||
return False, f"OP2 file does not exist: {op2_file}"
|
|
||||||
|
|
||||||
if op2_file.stat().st_size == 0:
|
|
||||||
return False, "OP2 file is empty"
|
|
||||||
|
|
||||||
# Try to extract first frequency
|
|
||||||
try:
|
|
||||||
frequency = robust_extract_first_frequency(
|
|
||||||
op2_file,
|
|
||||||
mode_number=1,
|
|
||||||
f06_file=f06_file,
|
|
||||||
verbose=False
|
|
||||||
)
|
|
||||||
return True, f"Valid OP2 file (first frequency: {frequency:.6f} Hz)"
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
return False, f"Cannot extract data from OP2: {str(e)}"
|
|
||||||
|
|
||||||
|
|
||||||
# Convenience function (same signature as old function for backward compatibility)
|
|
||||||
def extract_first_frequency(op2_file: Path, mode_number: int = 1) -> float:
|
|
||||||
"""
|
|
||||||
Extract first natural frequency (backward compatible with old function).
|
|
||||||
|
|
||||||
This is the simple version - just use robust_extract_first_frequency directly
|
|
||||||
for more control.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
op2_file: Path to OP2 file
|
|
||||||
mode_number: Mode number (1-based)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Frequency in Hz
|
|
||||||
"""
|
|
||||||
# Try to find F06 file in same directory
|
|
||||||
f06_file = op2_file.with_suffix('.f06')
|
|
||||||
|
|
||||||
return robust_extract_first_frequency(
|
|
||||||
op2_file,
|
|
||||||
mode_number=mode_number,
|
|
||||||
f06_file=f06_file if f06_file.exists() else None,
|
|
||||||
verbose=False
|
|
||||||
)
|
|
||||||
268
optimization_engine/plugins/post_solve/error_tracker.py
Normal file
268
optimization_engine/plugins/post_solve/error_tracker.py
Normal file
@@ -0,0 +1,268 @@
|
|||||||
|
"""
|
||||||
|
Error Tracker Hook - Context Engineering Integration
|
||||||
|
|
||||||
|
Preserves solver errors and failures in context for learning.
|
||||||
|
Based on Manus insight: "leave the wrong turns in the context"
|
||||||
|
|
||||||
|
This hook:
|
||||||
|
1. Captures solver errors and failures
|
||||||
|
2. Classifies error types for playbook categorization
|
||||||
|
3. Extracts relevant F06 content for analysis
|
||||||
|
4. Records errors to session state and LAC
|
||||||
|
|
||||||
|
Hook Point: post_solve
|
||||||
|
Priority: 100 (run early to capture before cleanup)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Dict, Any, Optional
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
def classify_error(error_msg: str) -> str:
|
||||||
|
"""
|
||||||
|
Classify error type for playbook categorization.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
error_msg: Error message text
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Error classification string
|
||||||
|
"""
|
||||||
|
error_lower = error_msg.lower()
|
||||||
|
|
||||||
|
# Check patterns in priority order
|
||||||
|
if any(x in error_lower for x in ['convergence', 'did not converge', 'diverge']):
|
||||||
|
return "convergence_failure"
|
||||||
|
elif any(x in error_lower for x in ['mesh', 'element', 'distorted', 'jacobian']):
|
||||||
|
return "mesh_error"
|
||||||
|
elif any(x in error_lower for x in ['singular', 'matrix', 'pivot', 'ill-conditioned']):
|
||||||
|
return "singularity"
|
||||||
|
elif any(x in error_lower for x in ['memory', 'allocation', 'out of memory']):
|
||||||
|
return "memory_error"
|
||||||
|
elif any(x in error_lower for x in ['license', 'checkout']):
|
||||||
|
return "license_error"
|
||||||
|
elif any(x in error_lower for x in ['boundary', 'constraint', 'spc', 'rigid body']):
|
||||||
|
return "boundary_condition_error"
|
||||||
|
elif any(x in error_lower for x in ['timeout', 'time limit']):
|
||||||
|
return "timeout_error"
|
||||||
|
elif any(x in error_lower for x in ['file', 'not found', 'missing']):
|
||||||
|
return "file_error"
|
||||||
|
else:
|
||||||
|
return "unknown_error"
|
||||||
|
|
||||||
|
|
||||||
|
def extract_f06_error(f06_path: Optional[str], max_chars: int = 500) -> str:
|
||||||
|
"""
|
||||||
|
Extract error section from F06 file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
f06_path: Path to F06 file
|
||||||
|
max_chars: Maximum characters to extract
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Error section content or empty string
|
||||||
|
"""
|
||||||
|
if not f06_path:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
path = Path(f06_path)
|
||||||
|
if not path.exists():
|
||||||
|
return ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# Look for error indicators
|
||||||
|
error_markers = [
|
||||||
|
"*** USER FATAL",
|
||||||
|
"*** SYSTEM FATAL",
|
||||||
|
"*** USER WARNING",
|
||||||
|
"*** SYSTEM WARNING",
|
||||||
|
"FATAL ERROR",
|
||||||
|
"ERROR MESSAGE"
|
||||||
|
]
|
||||||
|
|
||||||
|
for marker in error_markers:
|
||||||
|
if marker in content:
|
||||||
|
idx = content.index(marker)
|
||||||
|
# Extract surrounding context
|
||||||
|
start = max(0, idx - 100)
|
||||||
|
end = min(len(content), idx + max_chars)
|
||||||
|
return content[start:end].strip()
|
||||||
|
|
||||||
|
# If no explicit error marker, check for convergence messages
|
||||||
|
convergence_patterns = [
|
||||||
|
r"CONVERGENCE NOT ACHIEVED",
|
||||||
|
r"SOLUTION DID NOT CONVERGE",
|
||||||
|
r"DIVERGENCE DETECTED"
|
||||||
|
]
|
||||||
|
|
||||||
|
for pattern in convergence_patterns:
|
||||||
|
match = re.search(pattern, content, re.IGNORECASE)
|
||||||
|
if match:
|
||||||
|
idx = match.start()
|
||||||
|
start = max(0, idx - 50)
|
||||||
|
end = min(len(content), idx + max_chars)
|
||||||
|
return content[start:end].strip()
|
||||||
|
|
||||||
|
return ""
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return f"Error reading F06: {str(e)}"
|
||||||
|
|
||||||
|
|
||||||
|
def find_f06_file(working_dir: str, sim_file: str = "") -> Optional[Path]:
|
||||||
|
"""
|
||||||
|
Find the F06 file in the working directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
working_dir: Working directory path
|
||||||
|
sim_file: Simulation file name (for naming pattern)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path to F06 file or None
|
||||||
|
"""
|
||||||
|
work_path = Path(working_dir)
|
||||||
|
|
||||||
|
# Try common patterns
|
||||||
|
patterns = [
|
||||||
|
"*.f06",
|
||||||
|
"*-solution*.f06",
|
||||||
|
"*_sim*.f06"
|
||||||
|
]
|
||||||
|
|
||||||
|
for pattern in patterns:
|
||||||
|
matches = list(work_path.glob(pattern))
|
||||||
|
if matches:
|
||||||
|
# Return most recently modified
|
||||||
|
return max(matches, key=lambda p: p.stat().st_mtime)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def track_error(context: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Hook that preserves errors for context learning.
|
||||||
|
|
||||||
|
Called at post_solve after solver completes.
|
||||||
|
Captures error information regardless of success/failure
|
||||||
|
to enable learning from both outcomes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
context: Hook context with trial information
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with error tracking results
|
||||||
|
"""
|
||||||
|
trial_number = context.get('trial_number', -1)
|
||||||
|
working_dir = context.get('working_dir', '.')
|
||||||
|
output_dir = context.get('output_dir', working_dir)
|
||||||
|
solver_returncode = context.get('solver_returncode', 0)
|
||||||
|
|
||||||
|
# Determine if this is an error case
|
||||||
|
# (solver returncode non-zero, or explicit error flag)
|
||||||
|
is_error = (
|
||||||
|
solver_returncode != 0 or
|
||||||
|
context.get('error', False) or
|
||||||
|
context.get('solver_failed', False)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not is_error:
|
||||||
|
# No error to track, but still record success for learning
|
||||||
|
return {"error_tracked": False, "trial_success": True}
|
||||||
|
|
||||||
|
# Find and extract F06 error info
|
||||||
|
f06_path = context.get('f06_path')
|
||||||
|
if not f06_path:
|
||||||
|
f06_file = find_f06_file(working_dir, context.get('sim_file', ''))
|
||||||
|
if f06_file:
|
||||||
|
f06_path = str(f06_file)
|
||||||
|
|
||||||
|
f06_snippet = extract_f06_error(f06_path)
|
||||||
|
|
||||||
|
# Get error message from context or F06
|
||||||
|
error_message = context.get('error_message', '')
|
||||||
|
if not error_message and f06_snippet:
|
||||||
|
# Extract first line of F06 error as message
|
||||||
|
lines = f06_snippet.strip().split('\n')
|
||||||
|
error_message = lines[0][:200] if lines else "Unknown solver error"
|
||||||
|
|
||||||
|
# Classify error
|
||||||
|
error_type = classify_error(error_message or f06_snippet)
|
||||||
|
|
||||||
|
# Build error record
|
||||||
|
error_info = {
|
||||||
|
"trial": trial_number,
|
||||||
|
"timestamp": datetime.now().isoformat(),
|
||||||
|
"solver_returncode": solver_returncode,
|
||||||
|
"error_type": error_type,
|
||||||
|
"error_message": error_message,
|
||||||
|
"f06_snippet": f06_snippet[:1000] if f06_snippet else "",
|
||||||
|
"design_variables": context.get('design_variables', {}),
|
||||||
|
"working_dir": working_dir
|
||||||
|
}
|
||||||
|
|
||||||
|
# Save to error log (append mode - accumulate errors)
|
||||||
|
error_log_path = Path(output_dir) / "error_history.jsonl"
|
||||||
|
try:
|
||||||
|
error_log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(error_log_path, 'a', encoding='utf-8') as f:
|
||||||
|
f.write(json.dumps(error_info) + "\n")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Could not write error log: {e}")
|
||||||
|
|
||||||
|
# Try to update session state if context engineering is active
|
||||||
|
try:
|
||||||
|
from optimization_engine.context.session_state import get_session
|
||||||
|
session = get_session()
|
||||||
|
session.add_error(
|
||||||
|
f"Trial {trial_number}: {error_type} - {error_message[:100]}",
|
||||||
|
error_type=error_type
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
pass # Context module not available
|
||||||
|
|
||||||
|
# Try to record to LAC if available
|
||||||
|
try:
|
||||||
|
from knowledge_base.lac import get_lac
|
||||||
|
lac = get_lac()
|
||||||
|
lac.record_insight(
|
||||||
|
category="failure",
|
||||||
|
context=f"Trial {trial_number} solver error",
|
||||||
|
insight=f"{error_type}: {error_message[:200]}",
|
||||||
|
confidence=0.7,
|
||||||
|
tags=["solver", error_type, "automatic"]
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
pass # LAC not available
|
||||||
|
|
||||||
|
return {
|
||||||
|
"error_tracked": True,
|
||||||
|
"error_type": error_type,
|
||||||
|
"error_message": error_message[:200],
|
||||||
|
"f06_extracted": bool(f06_snippet)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Hook registration metadata
|
||||||
|
HOOK_CONFIG = {
|
||||||
|
"name": "error_tracker",
|
||||||
|
"hook_point": "post_solve",
|
||||||
|
"priority": 100, # Run early to capture before cleanup
|
||||||
|
"enabled": True,
|
||||||
|
"description": "Preserves solver errors for context learning"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Make the function discoverable by hook manager
|
||||||
|
def get_hook():
|
||||||
|
"""Return the hook function for registration."""
|
||||||
|
return track_error
|
||||||
|
|
||||||
|
|
||||||
|
# For direct plugin discovery
|
||||||
|
__all__ = ['track_error', 'HOOK_CONFIG', 'get_hook']
|
||||||
25
optimization_engine/processors/__init__.py
Normal file
25
optimization_engine/processors/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
"""
|
||||||
|
Optimization Processors
|
||||||
|
=======================
|
||||||
|
|
||||||
|
Data processing algorithms and ML models.
|
||||||
|
|
||||||
|
Submodules:
|
||||||
|
- surrogates/: Neural network surrogate models
|
||||||
|
- dynamic_response/: Dynamic response processing (random vib, sine sweep)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Lazy import for surrogates to avoid import errors
|
||||||
|
def __getattr__(name):
|
||||||
|
if name == 'surrogates':
|
||||||
|
from . import surrogates
|
||||||
|
return surrogates
|
||||||
|
elif name == 'AdaptiveCharacterization':
|
||||||
|
from .adaptive_characterization import AdaptiveCharacterization
|
||||||
|
return AdaptiveCharacterization
|
||||||
|
raise AttributeError(f"module 'optimization_engine.processors' has no attribute '{name}'")
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'surrogates',
|
||||||
|
'AdaptiveCharacterization',
|
||||||
|
]
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user