feat: tunable ranking, refresh status, chroma backup + admin endpoints
Three small improvements that move the operational baseline forward
without changing the existing trust model.
1. Tunable retrieval ranking weights
- rank_project_match_boost, rank_query_token_step,
rank_query_token_cap, rank_path_high_signal_boost,
rank_path_low_signal_penalty are now Settings fields
- all overridable via ATOCORE_* env vars
- retriever no longer hard-codes 2.0 / 1.18 / 0.72 / 0.08 / 1.32
- lets ranking be tuned per environment as Wave 1 is exercised
without code changes
2. /projects/{name}/refresh status
- refresh_registered_project now returns an overall status field
("ingested", "partial", "nothing_to_ingest") plus roots_ingested
and roots_skipped counters
- ProjectRefreshResponse advertises the new fields so callers can
rely on them
- covers the case where every configured root is missing on disk
3. Chroma cold snapshot + admin backup endpoints
- create_runtime_backup now accepts include_chroma and writes a
cold directory copy of the chroma persistence path
- new list_runtime_backups() and validate_backup() helpers
- new endpoints:
- POST /admin/backup create snapshot (optional chroma)
- GET /admin/backup list snapshots
- GET /admin/backup/{stamp}/validate structural validation
- chroma snapshots are taken under exclusive_ingestion() so a refresh
or ingest cannot race with the cold copy
- backup metadata records what was actually included and how big
Tests:
- 8 new tests covering tunable weights, refresh status branches
(ingested / partial / nothing_to_ingest), chroma snapshot, list,
validate, and the API endpoints (including the lock-acquisition path)
- existing fake refresh stubs in test_api_storage.py updated for the
expanded ProjectRefreshResponse model
- full suite: 105 passing (was 97)
next-steps doc updated to reflect that the chroma snapshot + restore
validation gap from current-state.md is now closed in code; only the
operational retention policy remains.
This commit is contained in:
@@ -34,6 +34,11 @@ from atocore.memory.service import (
|
||||
update_memory,
|
||||
)
|
||||
from atocore.observability.logger import get_logger
|
||||
from atocore.ops.backup import (
|
||||
create_runtime_backup,
|
||||
list_runtime_backups,
|
||||
validate_backup,
|
||||
)
|
||||
from atocore.projects.registry import (
|
||||
build_project_registration_proposal,
|
||||
get_project_registry_template,
|
||||
@@ -69,6 +74,9 @@ class ProjectRefreshResponse(BaseModel):
|
||||
aliases: list[str]
|
||||
description: str
|
||||
purge_deleted: bool
|
||||
status: str
|
||||
roots_ingested: int
|
||||
roots_skipped: int
|
||||
roots: list[dict]
|
||||
|
||||
|
||||
@@ -438,6 +446,49 @@ def api_invalidate_project_state(req: ProjectStateInvalidateRequest) -> dict:
|
||||
return {"status": "invalidated", "project": req.project, "category": req.category, "key": req.key}
|
||||
|
||||
|
||||
class BackupCreateRequest(BaseModel):
|
||||
include_chroma: bool = False
|
||||
|
||||
|
||||
@router.post("/admin/backup")
|
||||
def api_create_backup(req: BackupCreateRequest | None = None) -> dict:
|
||||
"""Create a runtime backup snapshot.
|
||||
|
||||
When ``include_chroma`` is true the call holds the ingestion lock so a
|
||||
safe cold copy of the vector store can be taken without racing against
|
||||
refresh or ingest endpoints.
|
||||
"""
|
||||
payload = req or BackupCreateRequest()
|
||||
try:
|
||||
if payload.include_chroma:
|
||||
with exclusive_ingestion():
|
||||
metadata = create_runtime_backup(include_chroma=True)
|
||||
else:
|
||||
metadata = create_runtime_backup(include_chroma=False)
|
||||
except Exception as e:
|
||||
log.error("admin_backup_failed", error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Backup failed: {e}")
|
||||
return metadata
|
||||
|
||||
|
||||
@router.get("/admin/backup")
|
||||
def api_list_backups() -> dict:
|
||||
"""List all runtime backups under the configured backup directory."""
|
||||
return {
|
||||
"backup_dir": str(_config.settings.resolved_backup_dir),
|
||||
"backups": list_runtime_backups(),
|
||||
}
|
||||
|
||||
|
||||
@router.get("/admin/backup/{stamp}/validate")
|
||||
def api_validate_backup(stamp: str) -> dict:
|
||||
"""Validate that a previously created backup is structurally usable."""
|
||||
result = validate_backup(stamp)
|
||||
if not result.get("exists", False):
|
||||
raise HTTPException(status_code=404, detail=f"Backup not found: {stamp}")
|
||||
return result
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
def api_health() -> dict:
|
||||
"""Health check."""
|
||||
|
||||
@@ -40,6 +40,15 @@ class Settings(BaseSettings):
|
||||
context_budget: int = 3000
|
||||
context_top_k: int = 15
|
||||
|
||||
# Retrieval ranking weights (tunable per environment).
|
||||
# All multipliers default to the values used since Wave 1; tighten or
|
||||
# loosen them via ATOCORE_* env vars without touching code.
|
||||
rank_project_match_boost: float = 2.0
|
||||
rank_query_token_step: float = 0.08
|
||||
rank_query_token_cap: float = 1.32
|
||||
rank_path_high_signal_boost: float = 1.18
|
||||
rank_path_low_signal_penalty: float = 0.72
|
||||
|
||||
model_config = {"env_prefix": "ATOCORE_"}
|
||||
|
||||
@property
|
||||
|
||||
@@ -1,8 +1,24 @@
|
||||
"""Create safe runtime backups for the AtoCore machine store."""
|
||||
"""Create safe runtime backups for the AtoCore machine store.
|
||||
|
||||
This module is intentionally conservative:
|
||||
|
||||
- The SQLite snapshot uses the online ``conn.backup()`` API and is safe to
|
||||
call while the database is in use.
|
||||
- The project registry snapshot is a simple file copy of the canonical
|
||||
registry JSON.
|
||||
- The Chroma snapshot is a *cold* directory copy. To stay safe it must be
|
||||
taken while no ingestion is running. The recommended pattern from the API
|
||||
layer is to acquire ``exclusive_ingestion()`` for the duration of the
|
||||
backup so refreshes and ingestions cannot run concurrently with the copy.
|
||||
|
||||
The backup metadata file records what was actually included so restore
|
||||
tooling does not have to guess.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import shutil
|
||||
import sqlite3
|
||||
from datetime import datetime, UTC
|
||||
from pathlib import Path
|
||||
@@ -14,8 +30,17 @@ from atocore.observability.logger import get_logger
|
||||
log = get_logger("backup")
|
||||
|
||||
|
||||
def create_runtime_backup(timestamp: datetime | None = None) -> dict:
|
||||
"""Create a hot backup of the SQLite DB plus registry/config metadata."""
|
||||
def create_runtime_backup(
|
||||
timestamp: datetime | None = None,
|
||||
include_chroma: bool = False,
|
||||
) -> dict:
|
||||
"""Create a hot SQLite backup plus registry/config metadata.
|
||||
|
||||
When ``include_chroma`` is true the Chroma persistence directory is also
|
||||
snapshotted as a cold directory copy. The caller is responsible for
|
||||
ensuring no ingestion is running concurrently. The HTTP layer enforces
|
||||
this by holding ``exclusive_ingestion()`` around the call.
|
||||
"""
|
||||
init_db()
|
||||
now = timestamp or datetime.now(UTC)
|
||||
stamp = now.strftime("%Y%m%dT%H%M%SZ")
|
||||
@@ -23,6 +48,7 @@ def create_runtime_backup(timestamp: datetime | None = None) -> dict:
|
||||
backup_root = _config.settings.resolved_backup_dir / "snapshots" / stamp
|
||||
db_backup_dir = backup_root / "db"
|
||||
config_backup_dir = backup_root / "config"
|
||||
chroma_backup_dir = backup_root / "chroma"
|
||||
metadata_path = backup_root / "backup-metadata.json"
|
||||
|
||||
db_backup_dir.mkdir(parents=True, exist_ok=True)
|
||||
@@ -35,7 +61,26 @@ def create_runtime_backup(timestamp: datetime | None = None) -> dict:
|
||||
registry_path = _config.settings.resolved_project_registry_path
|
||||
if registry_path.exists():
|
||||
registry_snapshot = config_backup_dir / registry_path.name
|
||||
registry_snapshot.write_text(registry_path.read_text(encoding="utf-8"), encoding="utf-8")
|
||||
registry_snapshot.write_text(
|
||||
registry_path.read_text(encoding="utf-8"), encoding="utf-8"
|
||||
)
|
||||
|
||||
chroma_snapshot_path = ""
|
||||
chroma_files_copied = 0
|
||||
chroma_bytes_copied = 0
|
||||
if include_chroma:
|
||||
source_chroma = _config.settings.chroma_path
|
||||
if source_chroma.exists() and source_chroma.is_dir():
|
||||
chroma_backup_dir.mkdir(parents=True, exist_ok=True)
|
||||
chroma_files_copied, chroma_bytes_copied = _copy_directory_tree(
|
||||
source_chroma, chroma_backup_dir
|
||||
)
|
||||
chroma_snapshot_path = str(chroma_backup_dir)
|
||||
else:
|
||||
log.info(
|
||||
"chroma_snapshot_skipped_missing",
|
||||
path=str(source_chroma),
|
||||
)
|
||||
|
||||
metadata = {
|
||||
"created_at": now.isoformat(),
|
||||
@@ -43,14 +88,134 @@ def create_runtime_backup(timestamp: datetime | None = None) -> dict:
|
||||
"db_snapshot_path": str(db_snapshot_path),
|
||||
"db_size_bytes": db_snapshot_path.stat().st_size,
|
||||
"registry_snapshot_path": str(registry_snapshot) if registry_snapshot else "",
|
||||
"vector_store_note": "Chroma hot backup is not included in this script; use a cold snapshot or rebuild/export workflow.",
|
||||
"chroma_snapshot_path": chroma_snapshot_path,
|
||||
"chroma_snapshot_bytes": chroma_bytes_copied,
|
||||
"chroma_snapshot_files": chroma_files_copied,
|
||||
"chroma_snapshot_included": include_chroma,
|
||||
"vector_store_note": (
|
||||
"Chroma snapshot included as cold directory copy."
|
||||
if include_chroma and chroma_snapshot_path
|
||||
else "Chroma hot backup is not included; rerun with include_chroma=True under exclusive_ingestion()."
|
||||
),
|
||||
}
|
||||
metadata_path.write_text(json.dumps(metadata, indent=2, ensure_ascii=True) + "\n", encoding="utf-8")
|
||||
metadata_path.write_text(
|
||||
json.dumps(metadata, indent=2, ensure_ascii=True) + "\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
log.info("runtime_backup_created", backup_root=str(backup_root), db_snapshot=str(db_snapshot_path))
|
||||
log.info(
|
||||
"runtime_backup_created",
|
||||
backup_root=str(backup_root),
|
||||
db_snapshot=str(db_snapshot_path),
|
||||
chroma_included=include_chroma,
|
||||
chroma_bytes=chroma_bytes_copied,
|
||||
)
|
||||
return metadata
|
||||
|
||||
|
||||
def list_runtime_backups() -> list[dict]:
|
||||
"""List all runtime backups under the configured backup directory."""
|
||||
snapshots_root = _config.settings.resolved_backup_dir / "snapshots"
|
||||
if not snapshots_root.exists() or not snapshots_root.is_dir():
|
||||
return []
|
||||
|
||||
entries: list[dict] = []
|
||||
for snapshot_dir in sorted(snapshots_root.iterdir()):
|
||||
if not snapshot_dir.is_dir():
|
||||
continue
|
||||
metadata_path = snapshot_dir / "backup-metadata.json"
|
||||
entry: dict = {
|
||||
"stamp": snapshot_dir.name,
|
||||
"path": str(snapshot_dir),
|
||||
"has_metadata": metadata_path.exists(),
|
||||
}
|
||||
if metadata_path.exists():
|
||||
try:
|
||||
entry["metadata"] = json.loads(metadata_path.read_text(encoding="utf-8"))
|
||||
except json.JSONDecodeError:
|
||||
entry["metadata"] = None
|
||||
entry["metadata_error"] = "invalid_json"
|
||||
entries.append(entry)
|
||||
return entries
|
||||
|
||||
|
||||
def validate_backup(stamp: str) -> dict:
|
||||
"""Validate that a previously created backup is structurally usable.
|
||||
|
||||
Checks:
|
||||
- the snapshot directory exists
|
||||
- the SQLite snapshot is openable and ``PRAGMA integrity_check`` returns ok
|
||||
- the registry snapshot, if recorded, parses as JSON
|
||||
- the chroma snapshot directory, if recorded, exists
|
||||
"""
|
||||
snapshot_dir = _config.settings.resolved_backup_dir / "snapshots" / stamp
|
||||
result: dict = {
|
||||
"stamp": stamp,
|
||||
"path": str(snapshot_dir),
|
||||
"exists": snapshot_dir.exists(),
|
||||
"db_ok": False,
|
||||
"registry_ok": None,
|
||||
"chroma_ok": None,
|
||||
"errors": [],
|
||||
}
|
||||
if not snapshot_dir.exists():
|
||||
result["errors"].append("snapshot_directory_missing")
|
||||
return result
|
||||
|
||||
metadata_path = snapshot_dir / "backup-metadata.json"
|
||||
if not metadata_path.exists():
|
||||
result["errors"].append("metadata_missing")
|
||||
return result
|
||||
|
||||
try:
|
||||
metadata = json.loads(metadata_path.read_text(encoding="utf-8"))
|
||||
except json.JSONDecodeError as exc:
|
||||
result["errors"].append(f"metadata_invalid_json: {exc}")
|
||||
return result
|
||||
result["metadata"] = metadata
|
||||
|
||||
db_path = Path(metadata.get("db_snapshot_path", ""))
|
||||
if not db_path.exists():
|
||||
result["errors"].append("db_snapshot_missing")
|
||||
else:
|
||||
try:
|
||||
with sqlite3.connect(str(db_path)) as conn:
|
||||
row = conn.execute("PRAGMA integrity_check").fetchone()
|
||||
result["db_ok"] = bool(row and row[0] == "ok")
|
||||
if not result["db_ok"]:
|
||||
result["errors"].append(
|
||||
f"db_integrity_check_failed: {row[0] if row else 'no_row'}"
|
||||
)
|
||||
except sqlite3.DatabaseError as exc:
|
||||
result["errors"].append(f"db_open_failed: {exc}")
|
||||
|
||||
registry_snapshot_path = metadata.get("registry_snapshot_path", "")
|
||||
if registry_snapshot_path:
|
||||
registry_path = Path(registry_snapshot_path)
|
||||
if not registry_path.exists():
|
||||
result["registry_ok"] = False
|
||||
result["errors"].append("registry_snapshot_missing")
|
||||
else:
|
||||
try:
|
||||
json.loads(registry_path.read_text(encoding="utf-8"))
|
||||
result["registry_ok"] = True
|
||||
except json.JSONDecodeError as exc:
|
||||
result["registry_ok"] = False
|
||||
result["errors"].append(f"registry_invalid_json: {exc}")
|
||||
|
||||
chroma_snapshot_path = metadata.get("chroma_snapshot_path", "")
|
||||
if chroma_snapshot_path:
|
||||
chroma_dir = Path(chroma_snapshot_path)
|
||||
if chroma_dir.exists() and chroma_dir.is_dir():
|
||||
result["chroma_ok"] = True
|
||||
else:
|
||||
result["chroma_ok"] = False
|
||||
result["errors"].append("chroma_snapshot_missing")
|
||||
|
||||
result["valid"] = not result["errors"]
|
||||
return result
|
||||
|
||||
|
||||
def _backup_sqlite_db(source_path: Path, dest_path: Path) -> None:
|
||||
source_conn = sqlite3.connect(str(source_path))
|
||||
dest_conn = sqlite3.connect(str(dest_path))
|
||||
@@ -61,6 +226,21 @@ def _backup_sqlite_db(source_path: Path, dest_path: Path) -> None:
|
||||
source_conn.close()
|
||||
|
||||
|
||||
def _copy_directory_tree(source: Path, dest: Path) -> tuple[int, int]:
|
||||
"""Copy a directory tree and return (file_count, total_bytes)."""
|
||||
if dest.exists():
|
||||
shutil.rmtree(dest)
|
||||
shutil.copytree(source, dest)
|
||||
|
||||
file_count = 0
|
||||
total_bytes = 0
|
||||
for path in dest.rglob("*"):
|
||||
if path.is_file():
|
||||
file_count += 1
|
||||
total_bytes += path.stat().st_size
|
||||
return file_count, total_bytes
|
||||
|
||||
|
||||
def main() -> None:
|
||||
result = create_runtime_backup()
|
||||
print(json.dumps(result, indent=2, ensure_ascii=True))
|
||||
|
||||
@@ -255,12 +255,23 @@ def get_registered_project(project_name: str) -> RegisteredProject | None:
|
||||
|
||||
|
||||
def refresh_registered_project(project_name: str, purge_deleted: bool = False) -> dict:
|
||||
"""Ingest all configured source roots for a registered project."""
|
||||
"""Ingest all configured source roots for a registered project.
|
||||
|
||||
The returned dict carries an overall ``status`` so callers can tell at a
|
||||
glance whether the refresh was fully successful, partial, or did nothing
|
||||
at all because every configured root was missing or not a directory:
|
||||
|
||||
- ``ingested``: every root was a real directory and was ingested
|
||||
- ``partial``: at least one root ingested and at least one was unusable
|
||||
- ``nothing_to_ingest``: no roots were usable
|
||||
"""
|
||||
project = get_registered_project(project_name)
|
||||
if project is None:
|
||||
raise ValueError(f"Unknown project: {project_name}")
|
||||
|
||||
roots = []
|
||||
ingested_count = 0
|
||||
skipped_count = 0
|
||||
for source_ref in project.ingest_roots:
|
||||
resolved = _resolve_ingest_root(source_ref)
|
||||
root_result = {
|
||||
@@ -271,9 +282,11 @@ def refresh_registered_project(project_name: str, purge_deleted: bool = False) -
|
||||
}
|
||||
if not resolved.exists():
|
||||
roots.append({**root_result, "status": "missing"})
|
||||
skipped_count += 1
|
||||
continue
|
||||
if not resolved.is_dir():
|
||||
roots.append({**root_result, "status": "not_directory"})
|
||||
skipped_count += 1
|
||||
continue
|
||||
|
||||
roots.append(
|
||||
@@ -283,12 +296,23 @@ def refresh_registered_project(project_name: str, purge_deleted: bool = False) -
|
||||
"results": ingest_folder(resolved, purge_deleted=purge_deleted),
|
||||
}
|
||||
)
|
||||
ingested_count += 1
|
||||
|
||||
if ingested_count == 0:
|
||||
overall_status = "nothing_to_ingest"
|
||||
elif skipped_count == 0:
|
||||
overall_status = "ingested"
|
||||
else:
|
||||
overall_status = "partial"
|
||||
|
||||
return {
|
||||
"project": project.project_id,
|
||||
"aliases": list(project.aliases),
|
||||
"description": project.description,
|
||||
"purge_deleted": purge_deleted,
|
||||
"status": overall_status,
|
||||
"roots_ingested": ingested_count,
|
||||
"roots_skipped": skipped_count,
|
||||
"roots": roots,
|
||||
}
|
||||
|
||||
|
||||
@@ -173,7 +173,7 @@ def _project_match_boost(project_hint: str, metadata: dict) -> float:
|
||||
|
||||
for candidate in candidate_names:
|
||||
if candidate and candidate in searchable:
|
||||
return 2.0
|
||||
return _config.settings.rank_project_match_boost
|
||||
|
||||
return 1.0
|
||||
|
||||
@@ -198,7 +198,10 @@ def _query_match_boost(query: str, metadata: dict) -> float:
|
||||
matches = sum(1 for token in set(tokens) if token in searchable)
|
||||
if matches <= 0:
|
||||
return 1.0
|
||||
return min(1.0 + matches * 0.08, 1.32)
|
||||
return min(
|
||||
1.0 + matches * _config.settings.rank_query_token_step,
|
||||
_config.settings.rank_query_token_cap,
|
||||
)
|
||||
|
||||
|
||||
def _path_signal_boost(metadata: dict) -> float:
|
||||
@@ -213,9 +216,9 @@ def _path_signal_boost(metadata: dict) -> float:
|
||||
|
||||
multiplier = 1.0
|
||||
if any(hint in searchable for hint in _LOW_SIGNAL_HINTS):
|
||||
multiplier *= 0.72
|
||||
multiplier *= _config.settings.rank_path_low_signal_penalty
|
||||
if any(hint in searchable for hint in _HIGH_SIGNAL_HINTS):
|
||||
multiplier *= 1.18
|
||||
multiplier *= _config.settings.rank_path_high_signal_boost
|
||||
return multiplier
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user