Files
ATOCore/openclaw-plugins/atocore-capture/index.js

153 lines
5.6 KiB
JavaScript
Raw Normal View History

import { definePluginEntry } from "openclaw/plugin-sdk/core";
const DEFAULT_BASE_URL = process.env.ATOCORE_BASE_URL || "http://dalidou:8100";
const DEFAULT_MIN_PROMPT_LENGTH = 15;
const DEFAULT_MAX_RESPONSE_LENGTH = 50_000;
feat: Phase 7I + UI refresh (capture form, memory/domain/activity pages, topnav) Closes three gaps the user surfaced: (1) OpenClaw agents run blind without AtoCore context, (2) mobile/desktop chats can't be captured at all, (3) wiki UI hadn't kept up with backend capabilities. Phase 7I — OpenClaw two-way bridge - Plugin now calls /context/build on before_agent_start and prepends the context pack to event.prompt, so whatever LLM runs underneath (sonnet, opus, codex, local model) answers grounded in AtoCore knowledge. Captured prompt stays the user's original text; fail-open with a 5s timeout. Config-gated via injectContext flag. - Plugin version 0.0.0 → 0.2.0; README rewritten. UI refresh - /wiki/capture — paste-to-ingest form for Claude Desktop / web / mobile / ChatGPT / other. Goes through normal /interactions pipeline with client="claude-desktop|claude-web|claude-mobile|chatgpt|other". Fixes the rotovap/mushroom-on-phone gap. - /wiki/memories/{id} (Phase 7E) — full memory detail: content, status, confidence, refs, valid_until, domain_tags (clickable to domain pages), project link, source chunk, graduated-to-entity link, full audit trail, related-by-tag neighbors. - /wiki/domains/{tag} (Phase 7F) — cross-project view: all active memories with the given tag grouped by project, sorted by count. Case-insensitive, whitespace-tolerant. Also surfaces graduated entities carrying the tag. - /wiki/activity — autonomous-activity timeline feed. Summary chips by action (created/promoted/merged/superseded/decayed/canonicalized) and by actor (auto-dedup-tier1, auto-dedup-tier2, confidence-decay, phase10-auto-promote, transient-to-durable, tag-canon, human-triage). Answers "what has the brain been doing while I was away?" - Home refresh: persistent topnav (Home · Activity · Capture · Triage · Dashboard), "What the brain is doing" snippet above project cards showing recent autonomous-actor counts, link to full activity. Tests: +10 (capture page, memory detail + 404, domain cross-project + empty + tag normalization, activity feed + groupings, home topnav, superseded-source detail after merge). 440 → 450. Known next: capture-browser extension for Claude.ai web (bigger project, deferred); voice/mobile relay (adjacent). Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-19 10:14:15 -04:00
// Phase 7I — context injection: cap how much AtoCore context we stuff
// back into the prompt. The /context/build endpoint respects a budget
// parameter too, but we keep a client-side safety net.
const DEFAULT_CONTEXT_CHAR_BUDGET = 4_000;
const DEFAULT_INJECT_CONTEXT = true;
function trimText(value) {
return typeof value === "string" ? value.trim() : "";
}
function truncateResponse(text, maxLength) {
if (!text || text.length <= maxLength) return text;
return `${text.slice(0, maxLength)}\n\n[truncated]`;
}
function shouldCapturePrompt(prompt, minLength) {
const text = trimText(prompt);
if (!text) return false;
if (text.startsWith("<")) return false;
return text.length >= minLength;
}
async function postInteraction(baseUrl, payload, logger) {
try {
const res = await fetch(`${baseUrl.replace(/\/$/, "")}/interactions`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(payload),
signal: AbortSignal.timeout(10_000)
});
if (!res.ok) {
logger?.debug?.("atocore_capture_post_failed", { status: res.status });
return false;
}
return true;
} catch (error) {
logger?.debug?.("atocore_capture_post_error", {
error: error instanceof Error ? error.message : String(error)
});
return false;
}
}
feat: Phase 7I + UI refresh (capture form, memory/domain/activity pages, topnav) Closes three gaps the user surfaced: (1) OpenClaw agents run blind without AtoCore context, (2) mobile/desktop chats can't be captured at all, (3) wiki UI hadn't kept up with backend capabilities. Phase 7I — OpenClaw two-way bridge - Plugin now calls /context/build on before_agent_start and prepends the context pack to event.prompt, so whatever LLM runs underneath (sonnet, opus, codex, local model) answers grounded in AtoCore knowledge. Captured prompt stays the user's original text; fail-open with a 5s timeout. Config-gated via injectContext flag. - Plugin version 0.0.0 → 0.2.0; README rewritten. UI refresh - /wiki/capture — paste-to-ingest form for Claude Desktop / web / mobile / ChatGPT / other. Goes through normal /interactions pipeline with client="claude-desktop|claude-web|claude-mobile|chatgpt|other". Fixes the rotovap/mushroom-on-phone gap. - /wiki/memories/{id} (Phase 7E) — full memory detail: content, status, confidence, refs, valid_until, domain_tags (clickable to domain pages), project link, source chunk, graduated-to-entity link, full audit trail, related-by-tag neighbors. - /wiki/domains/{tag} (Phase 7F) — cross-project view: all active memories with the given tag grouped by project, sorted by count. Case-insensitive, whitespace-tolerant. Also surfaces graduated entities carrying the tag. - /wiki/activity — autonomous-activity timeline feed. Summary chips by action (created/promoted/merged/superseded/decayed/canonicalized) and by actor (auto-dedup-tier1, auto-dedup-tier2, confidence-decay, phase10-auto-promote, transient-to-durable, tag-canon, human-triage). Answers "what has the brain been doing while I was away?" - Home refresh: persistent topnav (Home · Activity · Capture · Triage · Dashboard), "What the brain is doing" snippet above project cards showing recent autonomous-actor counts, link to full activity. Tests: +10 (capture page, memory detail + 404, domain cross-project + empty + tag normalization, activity feed + groupings, home topnav, superseded-source detail after merge). 440 → 450. Known next: capture-browser extension for Claude.ai web (bigger project, deferred); voice/mobile relay (adjacent). Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-19 10:14:15 -04:00
// Phase 7I — fetch a context pack for the incoming prompt so the agent
// answers grounded in what AtoCore already knows. Fail-open: if the
// request times out or errors, we just don't inject; the agent runs as
// before. Never block the user's turn on AtoCore availability.
async function fetchContextPack(baseUrl, prompt, project, charBudget, logger) {
try {
const res = await fetch(`${baseUrl.replace(/\/$/, "")}/context/build`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
prompt,
project: project || "",
char_budget: charBudget
}),
signal: AbortSignal.timeout(5_000)
});
if (!res.ok) {
logger?.debug?.("atocore_context_fetch_failed", { status: res.status });
return null;
}
const data = await res.json();
const pack = trimText(data?.formatted_context || "");
return pack || null;
} catch (error) {
logger?.debug?.("atocore_context_fetch_error", {
error: error instanceof Error ? error.message : String(error)
});
return null;
}
}
export default definePluginEntry({
register(api) {
const logger = api.logger;
const pendingBySession = new Map();
api.on("before_agent_start", async (event, ctx) => {
if (ctx?.trigger && ctx.trigger !== "user") return;
const config = api.getConfig?.() || {};
const minPromptLength = Number(config.minPromptLength || DEFAULT_MIN_PROMPT_LENGTH);
const prompt = trimText(event?.prompt || "");
if (!shouldCapturePrompt(prompt, minPromptLength)) {
pendingBySession.delete(ctx.sessionId);
return;
}
feat: Phase 7I + UI refresh (capture form, memory/domain/activity pages, topnav) Closes three gaps the user surfaced: (1) OpenClaw agents run blind without AtoCore context, (2) mobile/desktop chats can't be captured at all, (3) wiki UI hadn't kept up with backend capabilities. Phase 7I — OpenClaw two-way bridge - Plugin now calls /context/build on before_agent_start and prepends the context pack to event.prompt, so whatever LLM runs underneath (sonnet, opus, codex, local model) answers grounded in AtoCore knowledge. Captured prompt stays the user's original text; fail-open with a 5s timeout. Config-gated via injectContext flag. - Plugin version 0.0.0 → 0.2.0; README rewritten. UI refresh - /wiki/capture — paste-to-ingest form for Claude Desktop / web / mobile / ChatGPT / other. Goes through normal /interactions pipeline with client="claude-desktop|claude-web|claude-mobile|chatgpt|other". Fixes the rotovap/mushroom-on-phone gap. - /wiki/memories/{id} (Phase 7E) — full memory detail: content, status, confidence, refs, valid_until, domain_tags (clickable to domain pages), project link, source chunk, graduated-to-entity link, full audit trail, related-by-tag neighbors. - /wiki/domains/{tag} (Phase 7F) — cross-project view: all active memories with the given tag grouped by project, sorted by count. Case-insensitive, whitespace-tolerant. Also surfaces graduated entities carrying the tag. - /wiki/activity — autonomous-activity timeline feed. Summary chips by action (created/promoted/merged/superseded/decayed/canonicalized) and by actor (auto-dedup-tier1, auto-dedup-tier2, confidence-decay, phase10-auto-promote, transient-to-durable, tag-canon, human-triage). Answers "what has the brain been doing while I was away?" - Home refresh: persistent topnav (Home · Activity · Capture · Triage · Dashboard), "What the brain is doing" snippet above project cards showing recent autonomous-actor counts, link to full activity. Tests: +10 (capture page, memory detail + 404, domain cross-project + empty + tag normalization, activity feed + groupings, home topnav, superseded-source detail after merge). 440 → 450. Known next: capture-browser extension for Claude.ai web (bigger project, deferred); voice/mobile relay (adjacent). Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-19 10:14:15 -04:00
// Phase 7I — inject AtoCore context into the agent's prompt so it
// answers grounded in what the brain already knows. Config-gated
// (injectContext: false disables). Fail-open.
const baseUrl = trimText(config.baseUrl) || DEFAULT_BASE_URL;
const injectContext = config.injectContext !== false && DEFAULT_INJECT_CONTEXT;
const charBudget = Number(config.contextCharBudget || DEFAULT_CONTEXT_CHAR_BUDGET);
if (injectContext && event && typeof event === "object") {
const pack = await fetchContextPack(baseUrl, prompt, "", charBudget, logger);
if (pack) {
// Prepend to the event's prompt so the agent sees grounded info
// before the user's question. OpenClaw's agent receives
// event.prompt as its primary input; modifying it here grounds
// whatever LLM the agent delegates to (sonnet, opus, codex,
// local model — doesn't matter).
event.prompt = `${pack}\n\n---\n\n${prompt}`;
logger?.debug?.("atocore_context_injected", { chars: pack.length });
}
}
// Record the ORIGINAL user prompt (not the injected version) so
// captured interactions stay clean for later extraction.
pendingBySession.set(ctx.sessionId, {
prompt,
sessionId: ctx.sessionId,
sessionKey: ctx.sessionKey || "",
project: ""
});
});
api.on("llm_output", async (event, ctx) => {
if (ctx?.trigger && ctx.trigger !== "user") return;
const pending = pendingBySession.get(ctx.sessionId);
if (!pending) return;
const assistantTexts = Array.isArray(event?.assistantTexts) ? event.assistantTexts : [];
const response = truncateResponse(trimText(assistantTexts.join("\n\n")), Number((api.getConfig?.() || {}).maxResponseLength || DEFAULT_MAX_RESPONSE_LENGTH));
if (!response) return;
const config = api.getConfig?.() || {};
const baseUrl = trimText(config.baseUrl) || DEFAULT_BASE_URL;
const payload = {
prompt: pending.prompt,
response,
client: "openclaw",
session_id: pending.sessionKey || pending.sessionId,
project: pending.project || "",
reinforce: true
};
await postInteraction(baseUrl, payload, logger);
pendingBySession.delete(ctx.sessionId);
});
api.on("session_end", async (event) => {
if (event?.sessionId) pendingBySession.delete(event.sessionId);
});
}
});