Sync adabot changes on top of origin/main

Includes:
- memory-neo4j: four-phase sleep cycle (dedup, decay, extraction, cleanup)
- memory-neo4j: full plugin implementation with hybrid search
- memory-lancedb: updates and benchmarks
- OpenSpec workflow skills and commands
- Session memory hooks
- Various CLI and config improvements

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Tarun Sukhani
2026-02-04 15:14:46 +00:00
parent 7cfd0aed5f
commit e65d1deedd
59 changed files with 7326 additions and 310 deletions

3
.gitignore vendored
View File

@@ -86,3 +86,6 @@ USER.md
!.agent/workflows/
/local/
package-lock.json
# Claude Code local settings
.claude/

View File

@@ -0,0 +1,667 @@
(() => {
if (document.getElementById("docs-chat-root")) return;
// Determine if we're on the docs site or embedded elsewhere
const hostname = window.location.hostname;
const isDocsSite = hostname === "localhost" || hostname === "127.0.0.1" ||
hostname.includes("docs.openclaw") || hostname.endsWith(".mintlify.app");
const assetsBase = isDocsSite ? "" : "https://docs.openclaw.ai";
const apiBase = "https://claw-api.openknot.ai/api";
// Load marked for markdown rendering (via CDN)
let markedReady = false;
const loadMarkdownLib = () => {
if (window.marked) {
markedReady = true;
return;
}
const script = document.createElement("script");
script.src = "https://cdn.jsdelivr.net/npm/marked@15.0.6/marked.min.js";
script.onload = () => {
if (window.marked) {
markedReady = true;
}
};
script.onerror = () => console.warn("Failed to load marked library");
document.head.appendChild(script);
};
loadMarkdownLib();
// Markdown renderer with fallback before module loads
const renderMarkdown = (text) => {
if (markedReady && window.marked) {
// Configure marked for security: disable HTML pass-through
const html = window.marked.parse(text, { async: false, gfm: true, breaks: true });
// Open links in new tab by rewriting <a> tags
return html.replace(/<a href="/g, '<a target="_blank" rel="noopener" href="');
}
// Fallback: escape HTML and preserve newlines
return text
.replace(/&/g, "&amp;")
.replace(/</g, "&lt;")
.replace(/>/g, "&gt;")
.replace(/\n/g, "<br>");
};
const style = document.createElement("style");
style.textContent = `
#docs-chat-root { position: fixed; right: 20px; bottom: 20px; z-index: 9999; font-family: var(--font-body, system-ui, -apple-system, sans-serif); }
#docs-chat-root.docs-chat-expanded { right: 0; bottom: 0; top: 0; }
/* Thin scrollbar styling */
#docs-chat-root ::-webkit-scrollbar { width: 6px; height: 6px; }
#docs-chat-root ::-webkit-scrollbar-track { background: transparent; }
#docs-chat-root ::-webkit-scrollbar-thumb { background: var(--docs-chat-panel-border); border-radius: 3px; }
#docs-chat-root ::-webkit-scrollbar-thumb:hover { background: var(--docs-chat-muted); }
#docs-chat-root * { scrollbar-width: thin; scrollbar-color: var(--docs-chat-panel-border) transparent; }
:root {
--docs-chat-accent: var(--accent, #ff7d60);
--docs-chat-text: #1a1a1a;
--docs-chat-muted: #555;
--docs-chat-panel: rgba(255, 255, 255, 0.92);
--docs-chat-panel-border: rgba(0, 0, 0, 0.1);
--docs-chat-surface: rgba(250, 250, 250, 0.95);
--docs-chat-shadow: 0 18px 50px rgba(0,0,0,0.15);
--docs-chat-code-bg: rgba(0, 0, 0, 0.05);
--docs-chat-assistant-bg: #f5f5f5;
}
html[data-theme="dark"] {
--docs-chat-text: #e8e8e8;
--docs-chat-muted: #aaa;
--docs-chat-panel: rgba(28, 28, 30, 0.95);
--docs-chat-panel-border: rgba(255, 255, 255, 0.12);
--docs-chat-surface: rgba(38, 38, 40, 0.95);
--docs-chat-shadow: 0 18px 50px rgba(0,0,0,0.5);
--docs-chat-code-bg: rgba(255, 255, 255, 0.08);
--docs-chat-assistant-bg: #2a2a2c;
}
#docs-chat-button {
display: inline-flex;
align-items: center;
gap: 10px;
background: linear-gradient(140deg, rgba(255,90,54,0.25), rgba(255,90,54,0.06));
color: var(--docs-chat-text);
border: 1px solid rgba(255,90,54,0.4);
border-radius: 999px;
padding: 10px 14px;
cursor: pointer;
box-shadow: 0 8px 30px rgba(255,90,54, 0.08);
backdrop-filter: blur(12px);
-webkit-backdrop-filter: blur(12px);
font-family: var(--font-pixel, var(--font-body, system-ui, sans-serif));
}
#docs-chat-button span { font-weight: 600; letter-spacing: 0.04em; font-size: 14px; }
.docs-chat-logo { width: 20px; height: 20px; }
#docs-chat-panel {
width: min(440px, calc(100vw - 40px));
height: min(696px, calc(100vh - 80px));
background: var(--docs-chat-panel);
color: var(--docs-chat-text);
border-radius: 16px;
border: 1px solid var(--docs-chat-panel-border);
box-shadow: var(--docs-chat-shadow);
display: none;
flex-direction: column;
overflow: hidden;
backdrop-filter: blur(16px);
-webkit-backdrop-filter: blur(16px);
}
#docs-chat-root.docs-chat-expanded #docs-chat-panel {
width: min(512px, 100vw);
height: 100vh;
height: 100dvh;
border-radius: 18px 0 0 18px;
padding-top: env(safe-area-inset-top, 0);
padding-bottom: env(safe-area-inset-bottom, 0);
}
@media (max-width: 520px) {
#docs-chat-root.docs-chat-expanded #docs-chat-panel {
width: 100vw;
border-radius: 0;
}
#docs-chat-root.docs-chat-expanded { right: 0; left: 0; bottom: 0; top: 0; }
}
#docs-chat-header {
padding: 12px 14px;
font-weight: 600;
font-family: var(--font-pixel, var(--font-body, system-ui, sans-serif));
letter-spacing: 0.03em;
border-bottom: 1px solid var(--docs-chat-panel-border);
display: flex;
justify-content: space-between;
align-items: center;
}
#docs-chat-header-title { display: inline-flex; align-items: center; gap: 8px; }
#docs-chat-header-title span { color: var(--docs-chat-text); font-size: 15px; }
#docs-chat-header-actions { display: inline-flex; align-items: center; gap: 6px; }
.docs-chat-icon-button {
border: 1px solid var(--docs-chat-panel-border);
background: transparent;
color: inherit;
border-radius: 8px;
width: 30px;
height: 30px;
cursor: pointer;
font-size: 16px;
line-height: 1;
}
#docs-chat-messages { flex: 1; padding: 12px 14px; overflow: auto; background: transparent; }
#docs-chat-input {
display: flex;
gap: 8px;
padding: 12px 14px;
border-top: 1px solid var(--docs-chat-panel-border);
background: var(--docs-chat-surface);
backdrop-filter: blur(10px);
-webkit-backdrop-filter: blur(10px);
}
#docs-chat-input textarea {
flex: 1;
resize: none;
border: 1px solid var(--docs-chat-panel-border);
border-radius: 10px;
padding: 9px 10px;
font-size: 14px;
line-height: 1.5;
font-family: inherit;
color: var(--docs-chat-text);
background: var(--docs-chat-surface);
min-height: 42px;
max-height: 120px;
overflow-y: auto;
}
#docs-chat-input textarea::placeholder { color: var(--docs-chat-muted); }
#docs-chat-send {
background: var(--docs-chat-accent);
color: #fff;
border: none;
border-radius: 10px;
padding: 8px 14px;
cursor: pointer;
font-weight: 600;
font-family: inherit;
font-size: 14px;
transition: opacity 0.15s ease;
}
#docs-chat-send:hover { opacity: 0.9; }
#docs-chat-send:active { opacity: 0.8; }
.docs-chat-bubble {
margin-bottom: 10px;
padding: 10px 14px;
border-radius: 12px;
font-size: 14px;
line-height: 1.6;
max-width: 92%;
}
.docs-chat-user {
background: rgba(255, 125, 96, 0.15);
color: var(--docs-chat-text);
border: 1px solid rgba(255, 125, 96, 0.3);
align-self: flex-end;
white-space: pre-wrap;
margin-left: auto;
}
html[data-theme="dark"] .docs-chat-user {
background: rgba(255, 125, 96, 0.18);
border-color: rgba(255, 125, 96, 0.35);
}
.docs-chat-assistant {
background: var(--docs-chat-assistant-bg);
color: var(--docs-chat-text);
border: 1px solid var(--docs-chat-panel-border);
}
/* Markdown content styling for chat bubbles */
.docs-chat-assistant p { margin: 0 0 10px 0; }
.docs-chat-assistant p:last-child { margin-bottom: 0; }
.docs-chat-assistant code {
background: var(--docs-chat-code-bg);
padding: 2px 6px;
border-radius: 5px;
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace;
font-size: 0.9em;
}
.docs-chat-assistant pre {
background: var(--docs-chat-code-bg);
padding: 10px 12px;
border-radius: 8px;
overflow-x: auto;
margin: 6px 0;
font-size: 0.9em;
max-width: 100%;
white-space: pre;
word-wrap: normal;
}
.docs-chat-assistant pre::-webkit-scrollbar-thumb { background: transparent; }
.docs-chat-assistant pre:hover::-webkit-scrollbar-thumb { background: var(--docs-chat-panel-border); }
@media (hover: none) {
.docs-chat-assistant pre { -webkit-overflow-scrolling: touch; }
.docs-chat-assistant pre::-webkit-scrollbar-thumb { background: var(--docs-chat-panel-border); }
}
.docs-chat-assistant pre code {
background: transparent;
padding: 0;
font-size: inherit;
white-space: pre;
word-wrap: normal;
display: block;
}
/* Compact single-line code blocks */
.docs-chat-assistant pre.compact {
margin: 4px 0;
padding: 6px 10px;
}
/* Longer code blocks with copy button need extra top padding */
.docs-chat-assistant pre:not(.compact) {
padding-top: 28px;
}
.docs-chat-assistant a {
color: var(--docs-chat-accent);
text-decoration: underline;
text-underline-offset: 2px;
}
.docs-chat-assistant a:hover { opacity: 0.8; }
.docs-chat-assistant ul, .docs-chat-assistant ol {
margin: 8px 0;
padding-left: 18px;
list-style: none;
}
.docs-chat-assistant li {
margin: 4px 0;
position: relative;
padding-left: 14px;
}
.docs-chat-assistant li::before {
content: "•";
position: absolute;
left: 0;
color: var(--docs-chat-muted);
}
.docs-chat-assistant strong { font-weight: 600; }
.docs-chat-assistant em { font-style: italic; }
.docs-chat-assistant h1, .docs-chat-assistant h2, .docs-chat-assistant h3 {
font-weight: 600;
margin: 12px 0 6px 0;
line-height: 1.3;
}
.docs-chat-assistant h1 { font-size: 1.2em; }
.docs-chat-assistant h2 { font-size: 1.1em; }
.docs-chat-assistant h3 { font-size: 1.05em; }
.docs-chat-assistant blockquote {
border-left: 3px solid var(--docs-chat-accent);
margin: 10px 0;
padding: 4px 12px;
color: var(--docs-chat-muted);
background: var(--docs-chat-code-bg);
border-radius: 0 6px 6px 0;
}
.docs-chat-assistant hr {
border: none;
height: 1px;
background: var(--docs-chat-panel-border);
margin: 12px 0;
}
/* Copy buttons */
.docs-chat-assistant { position: relative; padding-top: 28px; }
.docs-chat-copy-response {
position: absolute;
top: 8px;
right: 8px;
background: var(--docs-chat-surface);
border: 1px solid var(--docs-chat-panel-border);
border-radius: 5px;
padding: 4px 8px;
font-size: 11px;
cursor: pointer;
color: var(--docs-chat-muted);
transition: color 0.15s ease, background 0.15s ease;
}
.docs-chat-copy-response:hover {
color: var(--docs-chat-text);
background: var(--docs-chat-code-bg);
}
.docs-chat-assistant pre {
position: relative;
}
.docs-chat-copy-code {
position: absolute;
top: 8px;
right: 8px;
background: var(--docs-chat-surface);
border: 1px solid var(--docs-chat-panel-border);
border-radius: 4px;
padding: 3px 7px;
font-size: 10px;
cursor: pointer;
color: var(--docs-chat-muted);
transition: color 0.15s ease, background 0.15s ease;
z-index: 1;
}
.docs-chat-copy-code:hover {
color: var(--docs-chat-text);
background: var(--docs-chat-code-bg);
}
/* Resize handle - left edge of expanded panel */
#docs-chat-resize-handle {
position: absolute;
left: 0;
top: 0;
bottom: 0;
width: 6px;
cursor: ew-resize;
z-index: 10;
display: none;
}
#docs-chat-root.docs-chat-expanded #docs-chat-resize-handle { display: block; }
#docs-chat-resize-handle::after {
content: "";
position: absolute;
left: 1px;
top: 50%;
transform: translateY(-50%);
width: 4px;
height: 40px;
border-radius: 2px;
background: var(--docs-chat-panel-border);
opacity: 0;
transition: opacity 0.15s ease, background 0.15s ease;
}
#docs-chat-resize-handle:hover::after,
#docs-chat-resize-handle.docs-chat-dragging::after {
opacity: 1;
background: var(--docs-chat-accent);
}
@media (max-width: 520px) {
#docs-chat-resize-handle { display: none !important; }
}
`;
document.head.appendChild(style);
const root = document.createElement("div");
root.id = "docs-chat-root";
const button = document.createElement("button");
button.id = "docs-chat-button";
button.type = "button";
button.innerHTML =
`<img class="docs-chat-logo" src="${assetsBase}/assets/pixel-lobster.svg" alt="OpenClaw">` +
`<span>Ask Molty</span>`;
const panel = document.createElement("div");
panel.id = "docs-chat-panel";
panel.style.display = "none";
// Resize handle for expandable sidebar width (desktop only)
const resizeHandle = document.createElement("div");
resizeHandle.id = "docs-chat-resize-handle";
const header = document.createElement("div");
header.id = "docs-chat-header";
header.innerHTML =
`<div id="docs-chat-header-title">` +
`<img class="docs-chat-logo" src="${assetsBase}/assets/pixel-lobster.svg" alt="OpenClaw">` +
`<span>OpenClaw Docs</span>` +
`</div>` +
`<div id="docs-chat-header-actions"></div>`;
const headerActions = header.querySelector("#docs-chat-header-actions");
const expand = document.createElement("button");
expand.type = "button";
expand.className = "docs-chat-icon-button";
expand.setAttribute("aria-label", "Expand");
expand.textContent = "⤢";
const clear = document.createElement("button");
clear.type = "button";
clear.className = "docs-chat-icon-button";
clear.setAttribute("aria-label", "Clear chat");
clear.textContent = "⌫";
const close = document.createElement("button");
close.type = "button";
close.className = "docs-chat-icon-button";
close.setAttribute("aria-label", "Close");
close.textContent = "×";
headerActions.appendChild(expand);
headerActions.appendChild(clear);
headerActions.appendChild(close);
const messages = document.createElement("div");
messages.id = "docs-chat-messages";
const inputWrap = document.createElement("div");
inputWrap.id = "docs-chat-input";
const textarea = document.createElement("textarea");
textarea.rows = 1;
textarea.placeholder = "Ask about OpenClaw Docs...";
// Auto-expand textarea as user types (up to max-height set in CSS)
const autoExpand = () => {
textarea.style.height = "auto";
textarea.style.height = Math.min(textarea.scrollHeight, 224) + "px";
};
textarea.addEventListener("input", autoExpand);
const send = document.createElement("button");
send.id = "docs-chat-send";
send.type = "button";
send.textContent = "Send";
inputWrap.appendChild(textarea);
inputWrap.appendChild(send);
panel.appendChild(resizeHandle);
panel.appendChild(header);
panel.appendChild(messages);
panel.appendChild(inputWrap);
root.appendChild(button);
root.appendChild(panel);
document.body.appendChild(root);
// Add copy buttons to assistant bubble
const addCopyButtons = (bubble, rawText) => {
// Add copy response button
const copyResponse = document.createElement("button");
copyResponse.className = "docs-chat-copy-response";
copyResponse.textContent = "Copy";
copyResponse.type = "button";
copyResponse.addEventListener("click", async () => {
try {
await navigator.clipboard.writeText(rawText);
copyResponse.textContent = "Copied!";
setTimeout(() => (copyResponse.textContent = "Copy"), 1500);
} catch (e) {
copyResponse.textContent = "Failed";
}
});
bubble.appendChild(copyResponse);
// Add copy buttons to code blocks (skip short/single-line blocks)
bubble.querySelectorAll("pre").forEach((pre) => {
const code = pre.querySelector("code") || pre;
const text = code.textContent || "";
const lineCount = text.split("\n").length;
const isShort = lineCount <= 2 && text.length < 100;
if (isShort) {
pre.classList.add("compact");
return; // Skip copy button for compact blocks
}
const copyCode = document.createElement("button");
copyCode.className = "docs-chat-copy-code";
copyCode.textContent = "Copy";
copyCode.type = "button";
copyCode.addEventListener("click", async (e) => {
e.stopPropagation();
try {
await navigator.clipboard.writeText(text);
copyCode.textContent = "Copied!";
setTimeout(() => (copyCode.textContent = "Copy"), 1500);
} catch (err) {
copyCode.textContent = "Failed";
}
});
pre.appendChild(copyCode);
});
};
const addBubble = (text, role, isMarkdown = false) => {
const bubble = document.createElement("div");
bubble.className =
"docs-chat-bubble " +
(role === "user" ? "docs-chat-user" : "docs-chat-assistant");
if (isMarkdown && role === "assistant") {
bubble.innerHTML = renderMarkdown(text);
} else {
bubble.textContent = text;
}
messages.appendChild(bubble);
messages.scrollTop = messages.scrollHeight;
return bubble;
};
let isExpanded = false;
let customWidth = null; // User-set width via drag
const MIN_WIDTH = 320;
const MAX_WIDTH = 800;
// Drag-to-resize logic
let isDragging = false;
let startX, startWidth;
resizeHandle.addEventListener("mousedown", (e) => {
if (!isExpanded) return;
isDragging = true;
startX = e.clientX;
startWidth = panel.offsetWidth;
resizeHandle.classList.add("docs-chat-dragging");
document.body.style.cursor = "ew-resize";
document.body.style.userSelect = "none";
e.preventDefault();
});
document.addEventListener("mousemove", (e) => {
if (!isDragging) return;
// Panel is on right, so dragging left increases width
const delta = startX - e.clientX;
const newWidth = Math.min(MAX_WIDTH, Math.max(MIN_WIDTH, startWidth + delta));
customWidth = newWidth;
panel.style.width = newWidth + "px";
});
document.addEventListener("mouseup", () => {
if (!isDragging) return;
isDragging = false;
resizeHandle.classList.remove("docs-chat-dragging");
document.body.style.cursor = "";
document.body.style.userSelect = "";
});
const setOpen = (isOpen) => {
panel.style.display = isOpen ? "flex" : "none";
button.style.display = isOpen ? "none" : "inline-flex";
root.classList.toggle("docs-chat-expanded", isOpen && isExpanded);
if (!isOpen) {
panel.style.width = ""; // Reset to CSS default when closed
} else if (isExpanded && customWidth) {
panel.style.width = customWidth + "px";
}
if (isOpen) textarea.focus();
};
const setExpanded = (next) => {
isExpanded = next;
expand.textContent = isExpanded ? "⤡" : "⤢";
expand.setAttribute("aria-label", isExpanded ? "Collapse" : "Expand");
if (panel.style.display !== "none") {
root.classList.toggle("docs-chat-expanded", isExpanded);
if (isExpanded && customWidth) {
panel.style.width = customWidth + "px";
} else if (!isExpanded) {
panel.style.width = ""; // Reset to CSS default
}
}
};
button.addEventListener("click", () => setOpen(true));
expand.addEventListener("click", () => setExpanded(!isExpanded));
clear.addEventListener("click", () => {
messages.innerHTML = "";
});
close.addEventListener("click", () => {
setOpen(false);
root.classList.remove("docs-chat-expanded");
});
const sendMessage = async () => {
const text = textarea.value.trim();
if (!text) return;
textarea.value = "";
textarea.style.height = "auto"; // Reset height after sending
addBubble(text, "user");
const assistantBubble = addBubble("...", "assistant");
assistantBubble.innerHTML = "";
let fullText = "";
try {
const response = await fetch(`${apiBase}/chat`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ message: text }),
});
// Handle rate limiting
if (response.status === 429) {
const retryAfter = response.headers.get("Retry-After") || "60";
fullText = `You're asking questions too quickly. Please wait ${retryAfter} seconds before trying again.`;
assistantBubble.innerHTML = renderMarkdown(fullText);
addCopyButtons(assistantBubble, fullText);
return;
}
// Handle other errors
if (!response.ok) {
try {
const errorData = await response.json();
fullText = errorData.error || "Something went wrong. Please try again.";
} catch {
fullText = "Something went wrong. Please try again.";
}
assistantBubble.innerHTML = renderMarkdown(fullText);
addCopyButtons(assistantBubble, fullText);
return;
}
if (!response.body) {
fullText = await response.text();
assistantBubble.innerHTML = renderMarkdown(fullText);
addCopyButtons(assistantBubble, fullText);
return;
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
while (true) {
const { value, done } = await reader.read();
if (done) break;
fullText += decoder.decode(value, { stream: true });
// Re-render markdown on each chunk for live preview
assistantBubble.innerHTML = renderMarkdown(fullText);
messages.scrollTop = messages.scrollHeight;
}
// Flush any remaining buffered bytes (partial UTF-8 sequences)
fullText += decoder.decode();
assistantBubble.innerHTML = renderMarkdown(fullText);
// Add copy buttons after streaming completes
addCopyButtons(assistantBubble, fullText);
} catch (err) {
fullText = "Failed to reach docs chat API.";
assistantBubble.innerHTML = renderMarkdown(fullText);
addCopyButtons(assistantBubble, fullText);
}
};
send.addEventListener("click", sendMessage);
textarea.addEventListener("keydown", (event) => {
if (event.key === "Enter" && !event.shiftKey) {
event.preventDefault();
sendMessage();
}
});
})();

View File

@@ -0,0 +1,85 @@
#!/usr/bin/env node
/**
* LanceDB performance benchmark
*/
import * as lancedb from "@lancedb/lancedb";
import OpenAI from "openai";
const LANCEDB_PATH = "/home/tsukhani/.openclaw/memory/lancedb";
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
const openai = new OpenAI({ apiKey: OPENAI_API_KEY });
async function embed(text) {
const start = Date.now();
const response = await openai.embeddings.create({
model: "text-embedding-3-small",
input: text,
});
const embedTime = Date.now() - start;
return { vector: response.data[0].embedding, embedTime };
}
async function main() {
console.log("📊 LanceDB Performance Benchmark");
console.log("================================\n");
// Connect
const connectStart = Date.now();
const db = await lancedb.connect(LANCEDB_PATH);
const table = await db.openTable("memories");
const connectTime = Date.now() - connectStart;
console.log(`Connection time: ${connectTime}ms`);
const count = await table.countRows();
console.log(`Total memories: ${count}\n`);
// Test queries
const queries = [
"Tarun's preferences",
"What is the OpenRouter API key location?",
"meeting schedule",
"Abundent Academy training",
"slate blue",
];
console.log("Search benchmarks (5 runs each, limit=5):\n");
for (const query of queries) {
const times = [];
let embedTime = 0;
for (let i = 0; i < 5; i++) {
const { vector, embedTime: et } = await embed(query);
embedTime = et; // Last one
const searchStart = Date.now();
const _results = await table.vectorSearch(vector).limit(5).toArray();
const searchTime = Date.now() - searchStart;
times.push(searchTime);
}
const avg = Math.round(times.reduce((a, b) => a + b, 0) / times.length);
const min = Math.min(...times);
const max = Math.max(...times);
console.log(`"${query}"`);
console.log(` Embedding: ${embedTime}ms`);
console.log(` Search: avg=${avg}ms, min=${min}ms, max=${max}ms`);
console.log("");
}
// Raw vector search (no embedding)
console.log("\nRaw vector search (pre-computed embedding):");
const { vector } = await embed("test query");
const rawTimes = [];
for (let i = 0; i < 10; i++) {
const start = Date.now();
await table.vectorSearch(vector).limit(5).toArray();
rawTimes.push(Date.now() - start);
}
const avgRaw = Math.round(rawTimes.reduce((a, b) => a + b, 0) / rawTimes.length);
console.log(` avg=${avgRaw}ms, min=${Math.min(...rawTimes)}ms, max=${Math.max(...rawTimes)}ms`);
}
main().catch(console.error);

View File

@@ -2,6 +2,20 @@ import fs from "node:fs";
import { homedir } from "node:os";
import { join } from "node:path";
export type AutoCaptureConfig = {
enabled: boolean;
/** LLM provider for memory extraction: "openrouter" (default) or "openai" */
provider?: "openrouter" | "openai";
/** LLM model for memory extraction (default: google/gemini-2.0-flash-001) */
model?: string;
/** API key for the LLM provider (supports ${ENV_VAR} syntax) */
apiKey?: string;
/** Base URL for the LLM provider (default: https://openrouter.ai/api/v1) */
baseUrl?: string;
/** Maximum messages to send for extraction (default: 10) */
maxMessages?: number;
};
export type MemoryConfig = {
embedding: {
provider: "openai";
@@ -9,16 +23,29 @@ export type MemoryConfig = {
apiKey: string;
};
dbPath?: string;
autoCapture?: boolean;
/** @deprecated Use autoCapture object instead. Boolean true enables with defaults. */
autoCapture?: boolean | AutoCaptureConfig;
autoRecall?: boolean;
captureMaxChars?: number;
coreMemory?: {
enabled?: boolean;
/** Maximum number of core memories to load */
maxEntries?: number;
/** Minimum importance threshold for core memories */
minImportance?: number;
};
};
export const MEMORY_CATEGORIES = ["preference", "fact", "decision", "entity", "other"] as const;
export const MEMORY_CATEGORIES = [
"preference",
"fact",
"decision",
"entity",
"other",
"core",
] as const;
export type MemoryCategory = (typeof MEMORY_CATEGORIES)[number];
const DEFAULT_MODEL = "text-embedding-3-small";
export const DEFAULT_CAPTURE_MAX_CHARS = 500;
const LEGACY_STATE_DIRS: string[] = [];
function resolveDefaultDbPath(): string {
@@ -93,7 +120,7 @@ export const memoryConfigSchema = {
const cfg = value as Record<string, unknown>;
assertAllowedKeys(
cfg,
["embedding", "dbPath", "autoCapture", "autoRecall", "captureMaxChars"],
["embedding", "dbPath", "autoCapture", "autoRecall", "coreMemory"],
"memory config",
);
@@ -105,13 +132,41 @@ export const memoryConfigSchema = {
const model = resolveEmbeddingModel(embedding);
const captureMaxChars =
typeof cfg.captureMaxChars === "number" ? Math.floor(cfg.captureMaxChars) : undefined;
if (
typeof captureMaxChars === "number" &&
(captureMaxChars < 100 || captureMaxChars > 10_000)
) {
throw new Error("captureMaxChars must be between 100 and 10000");
// Parse autoCapture (supports boolean for backward compat, or object for LLM config)
let autoCapture: MemoryConfig["autoCapture"];
if (cfg.autoCapture === false) {
autoCapture = false;
} else if (cfg.autoCapture === true || cfg.autoCapture === undefined) {
// Legacy boolean or default — enable with defaults
autoCapture = { enabled: true };
} else if (typeof cfg.autoCapture === "object" && !Array.isArray(cfg.autoCapture)) {
const ac = cfg.autoCapture as Record<string, unknown>;
assertAllowedKeys(
ac,
["enabled", "provider", "model", "apiKey", "baseUrl", "maxMessages"],
"autoCapture config",
);
autoCapture = {
enabled: ac.enabled !== false,
provider:
ac.provider === "openai" || ac.provider === "openrouter" ? ac.provider : "openrouter",
model: typeof ac.model === "string" ? ac.model : undefined,
apiKey: typeof ac.apiKey === "string" ? resolveEnvVars(ac.apiKey) : undefined,
baseUrl: typeof ac.baseUrl === "string" ? ac.baseUrl : undefined,
maxMessages: typeof ac.maxMessages === "number" ? ac.maxMessages : undefined,
};
}
// Parse coreMemory
let coreMemory: MemoryConfig["coreMemory"];
if (cfg.coreMemory && typeof cfg.coreMemory === "object" && !Array.isArray(cfg.coreMemory)) {
const bc = cfg.coreMemory as Record<string, unknown>;
assertAllowedKeys(bc, ["enabled", "maxEntries", "minImportance"], "coreMemory config");
coreMemory = {
enabled: bc.enabled === true,
maxEntries: typeof bc.maxEntries === "number" ? bc.maxEntries : 50,
minImportance: typeof bc.minImportance === "number" ? bc.minImportance : 0.5,
};
}
return {
@@ -121,9 +176,10 @@ export const memoryConfigSchema = {
apiKey: resolveEnvVars(embedding.apiKey),
},
dbPath: typeof cfg.dbPath === "string" ? cfg.dbPath : DEFAULT_DB_PATH,
autoCapture: cfg.autoCapture === true,
autoCapture: autoCapture ?? { enabled: true },
autoRecall: cfg.autoRecall !== false,
captureMaxChars: captureMaxChars ?? DEFAULT_CAPTURE_MAX_CHARS,
// Default coreMemory to enabled for consistency with autoCapture/autoRecall
coreMemory: coreMemory ?? { enabled: true, maxEntries: 50, minImportance: 0.5 },
};
},
uiHints: {
@@ -143,19 +199,47 @@ export const memoryConfigSchema = {
placeholder: "~/.openclaw/memory/lancedb",
advanced: true,
},
autoCapture: {
"autoCapture.enabled": {
label: "Auto-Capture",
help: "Automatically capture important information from conversations",
help: "Automatically capture important information from conversations using LLM extraction",
},
"autoCapture.provider": {
label: "Capture LLM Provider",
placeholder: "openrouter",
advanced: true,
help: "LLM provider for memory extraction (openrouter or openai)",
},
"autoCapture.model": {
label: "Capture Model",
placeholder: "google/gemini-2.0-flash-001",
advanced: true,
help: "LLM model for memory extraction (use a fast/cheap model)",
},
"autoCapture.apiKey": {
label: "Capture API Key",
sensitive: true,
advanced: true,
help: "API key for capture LLM (defaults to OpenRouter key from provider config)",
},
autoRecall: {
label: "Auto-Recall",
help: "Automatically inject relevant memories into context",
},
captureMaxChars: {
label: "Capture Max Chars",
help: "Maximum message length eligible for auto-capture",
"coreMemory.enabled": {
label: "Core Memory",
help: "Inject core memories as virtual MEMORY.md at session start (replaces MEMORY.md file)",
},
"coreMemory.maxEntries": {
label: "Max Core Entries",
placeholder: "50",
advanced: true,
placeholder: String(DEFAULT_CAPTURE_MAX_CHARS),
help: "Maximum number of core memories to load",
},
"coreMemory.minImportance": {
label: "Min Core Importance",
placeholder: "0.5",
advanced: true,
help: "Minimum importance threshold for core memories (0-1)",
},
},
};

View File

@@ -0,0 +1,102 @@
#!/usr/bin/env node
/**
* Export memories from LanceDB for migration to memory-neo4j
*
* Usage:
* pnpm exec node export-memories.mjs [output-file.json]
*
* Default output: memories-export.json
*/
import * as lancedb from "@lancedb/lancedb";
import { writeFileSync } from "fs";
const LANCEDB_PATH = process.env.LANCEDB_PATH || "/home/tsukhani/.openclaw/memory/lancedb";
const AGENT_ID = process.env.AGENT_ID || "main";
const outputFile = process.argv[2] || "memories-export.json";
console.log("📦 Memory Export Tool (LanceDB)");
console.log(` LanceDB path: ${LANCEDB_PATH}`);
console.log(` Output: ${outputFile}`);
console.log("");
// Transform for neo4j format
function transformMemory(lanceEntry) {
const createdAtISO = new Date(lanceEntry.createdAt).toISOString();
return {
id: lanceEntry.id,
text: lanceEntry.text,
embedding: lanceEntry.vector,
importance: lanceEntry.importance,
category: lanceEntry.category,
createdAt: createdAtISO,
updatedAt: createdAtISO,
source: "import",
extractionStatus: "skipped",
agentId: AGENT_ID,
};
}
async function main() {
// Load from LanceDB
console.log("📥 Loading from LanceDB...");
const db = await lancedb.connect(LANCEDB_PATH);
const table = await db.openTable("memories");
const count = await table.countRows();
console.log(` Found ${count} memories`);
const memories = await table
.query()
.limit(count + 100)
.toArray();
console.log(` Loaded ${memories.length} memories`);
// Transform
console.log("🔄 Transforming...");
const transformed = memories.map(transformMemory);
// Stats
const stats = {};
transformed.forEach((m) => {
stats[m.category] = (stats[m.category] || 0) + 1;
});
console.log(" Categories:", stats);
// Export
console.log(`📤 Exporting to ${outputFile}...`);
const exportData = {
exportedAt: new Date().toISOString(),
sourcePlugin: "memory-lancedb",
targetPlugin: "memory-neo4j",
agentId: AGENT_ID,
vectorDim: transformed[0]?.embedding?.length || 1536,
count: transformed.length,
stats,
memories: transformed,
};
writeFileSync(outputFile, JSON.stringify(exportData, null, 2));
// Also write a preview without embeddings
const previewFile = outputFile.replace(".json", "-preview.json");
const preview = {
...exportData,
memories: transformed.map((m) => ({
...m,
embedding: `[${m.embedding?.length} dims]`,
})),
};
writeFileSync(previewFile, JSON.stringify(preview, null, 2));
console.log(`✅ Exported ${transformed.length} memories`);
console.log(
` Full export: ${outputFile} (${(JSON.stringify(exportData).length / 1024 / 1024).toFixed(2)} MB)`,
);
console.log(` Preview: ${previewFile}`);
}
main().catch((err) => {
console.error("❌ Error:", err.message);
process.exit(1);
});

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,26 @@
import * as lancedb from "@lancedb/lancedb";
const db = await lancedb.connect("/home/tsukhani/.openclaw/memory/lancedb");
const tables = await db.tableNames();
console.log("Tables:", tables);
if (tables.includes("memories")) {
const table = await db.openTable("memories");
const count = await table.countRows();
console.log("Memory count:", count);
const all = await table.query().limit(200).toArray();
const stats = { preference: 0, fact: 0, decision: 0, entity: 0, other: 0, core: 0 };
all.forEach((e) => {
stats[e.category] = (stats[e.category] || 0) + 1;
});
console.log("\nCategory breakdown:", stats);
console.log("\nSample entries:");
all.slice(0, 5).forEach((e, i) => {
console.log(`${i + 1}. [${e.category}] ${(e.text || "").substring(0, 100)}...`);
console.log(` id: ${e.id}, importance: ${e.importance}, vectorDim: ${e.vector?.length}`);
});
}

View File

@@ -26,11 +26,21 @@
"label": "Auto-Recall",
"help": "Automatically inject relevant memories into context"
},
"captureMaxChars": {
"label": "Capture Max Chars",
"help": "Maximum message length eligible for auto-capture",
"coreMemory.enabled": {
"label": "Core Memory",
"help": "Inject core memories as virtual MEMORY.md at session start (replaces MEMORY.md file)"
},
"coreMemory.maxEntries": {
"label": "Max Core Entries",
"placeholder": "50",
"advanced": true,
"placeholder": "500"
"help": "Maximum number of core memories to load"
},
"coreMemory.minImportance": {
"label": "Min Core Importance",
"placeholder": "0.5",
"advanced": true,
"help": "Minimum importance threshold for core memories (0-1)"
}
},
"configSchema": {
@@ -60,10 +70,20 @@
"autoRecall": {
"type": "boolean"
},
"captureMaxChars": {
"type": "number",
"minimum": 100,
"maximum": 10000
"coreMemory": {
"type": "object",
"additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
},
"maxEntries": {
"type": "number"
},
"minImportance": {
"type": "number"
}
}
}
},
"required": ["embedding"]

View File

@@ -0,0 +1,209 @@
/**
* Configuration schema for memory-neo4j plugin.
*
* Matches the JSON Schema in openclaw.plugin.json.
* Provides runtime parsing with env var resolution and defaults.
*/
export type EmbeddingProvider = "openai" | "ollama";
export type MemoryNeo4jConfig = {
neo4j: {
uri: string;
username: string;
password: string;
};
embedding: {
provider: EmbeddingProvider;
apiKey?: string;
model: string;
baseUrl?: string;
};
autoCapture: boolean;
autoRecall: boolean;
coreMemory: {
enabled: boolean;
maxEntries: number;
};
};
/**
* Extraction configuration resolved from environment variables.
* Entity extraction auto-enables when OPENROUTER_API_KEY is set.
*/
export type ExtractionConfig = {
enabled: boolean;
apiKey: string;
model: string;
baseUrl: string;
temperature: number;
maxRetries: number;
};
export const MEMORY_CATEGORIES = [
"core",
"preference",
"fact",
"decision",
"entity",
"other",
] as const;
export type MemoryCategory = (typeof MEMORY_CATEGORIES)[number];
const EMBEDDING_DIMENSIONS: Record<string, number> = {
// OpenAI models
"text-embedding-3-small": 1536,
"text-embedding-3-large": 3072,
// Ollama models (common ones)
"mxbai-embed-large": 1024,
"mxbai-embed-large-2k:latest": 1024,
"nomic-embed-text": 768,
"all-minilm": 384,
};
// Default dimension for unknown models (Ollama models vary)
const DEFAULT_EMBEDDING_DIMS = 1024;
export function vectorDimsForModel(model: string): number {
// Check exact match first
if (EMBEDDING_DIMENSIONS[model]) {
return EMBEDDING_DIMENSIONS[model];
}
// Check prefix match (for versioned models like mxbai-embed-large:latest)
for (const [known, dims] of Object.entries(EMBEDDING_DIMENSIONS)) {
if (model.startsWith(known)) {
return dims;
}
}
// Return default for unknown models
return DEFAULT_EMBEDDING_DIMS;
}
/**
* Resolve ${ENV_VAR} references in string values.
*/
function resolveEnvVars(value: string): string {
return value.replace(/\$\{([^}]+)\}/g, (_, envVar) => {
const envValue = process.env[envVar];
if (!envValue) {
throw new Error(`Environment variable ${envVar} is not set`);
}
return envValue;
});
}
/**
* Resolve extraction config from environment variables.
* Returns enabled: false if OPENROUTER_API_KEY is not set.
*/
export function resolveExtractionConfig(): ExtractionConfig {
const apiKey = process.env.OPENROUTER_API_KEY ?? "";
return {
enabled: apiKey.length > 0,
apiKey,
model: process.env.EXTRACTION_MODEL ?? "google/gemini-2.0-flash-001",
baseUrl: process.env.EXTRACTION_BASE_URL ?? "https://openrouter.ai/api/v1",
temperature: 0.0,
maxRetries: 2,
};
}
function assertAllowedKeys(value: Record<string, unknown>, allowed: string[], label: string) {
const unknown = Object.keys(value).filter((key) => !allowed.includes(key));
if (unknown.length > 0) {
throw new Error(`${label} has unknown keys: ${unknown.join(", ")}`);
}
}
/**
* Config schema with parse method for runtime validation & transformation.
* JSON Schema validation is handled by openclaw.plugin.json; this handles
* env var resolution and defaults.
*/
export const memoryNeo4jConfigSchema = {
parse(value: unknown): MemoryNeo4jConfig {
if (!value || typeof value !== "object" || Array.isArray(value)) {
throw new Error("memory-neo4j config required");
}
const cfg = value as Record<string, unknown>;
assertAllowedKeys(
cfg,
["embedding", "neo4j", "autoCapture", "autoRecall", "coreMemory"],
"memory-neo4j config",
);
// Parse neo4j section
const neo4jRaw = cfg.neo4j as Record<string, unknown> | undefined;
if (!neo4jRaw || typeof neo4jRaw !== "object") {
throw new Error("neo4j config section is required");
}
assertAllowedKeys(neo4jRaw, ["uri", "user", "username", "password"], "neo4j config");
if (typeof neo4jRaw.uri !== "string" || !neo4jRaw.uri) {
throw new Error("neo4j.uri is required");
}
const neo4jPassword =
typeof neo4jRaw.password === "string" ? resolveEnvVars(neo4jRaw.password) : "";
// Support both 'user' and 'username' for neo4j config
const neo4jUsername =
typeof neo4jRaw.user === "string"
? neo4jRaw.user
: typeof neo4jRaw.username === "string"
? neo4jRaw.username
: "neo4j";
// Parse embedding section (optional for ollama without apiKey)
const embeddingRaw = cfg.embedding as Record<string, unknown> | undefined;
assertAllowedKeys(
embeddingRaw ?? {},
["provider", "apiKey", "model", "baseUrl"],
"embedding config",
);
const provider: EmbeddingProvider = embeddingRaw?.provider === "ollama" ? "ollama" : "openai";
// apiKey is required for openai, optional for ollama
let apiKey: string | undefined;
if (typeof embeddingRaw?.apiKey === "string" && embeddingRaw.apiKey) {
apiKey = resolveEnvVars(embeddingRaw.apiKey);
} else if (provider === "openai") {
throw new Error("embedding.apiKey is required for OpenAI provider");
}
const embeddingModel =
typeof embeddingRaw?.model === "string"
? embeddingRaw.model
: provider === "ollama"
? "mxbai-embed-large"
: "text-embedding-3-small";
const baseUrl = typeof embeddingRaw?.baseUrl === "string" ? embeddingRaw.baseUrl : undefined;
// Parse coreMemory section (optional with defaults)
const coreMemoryRaw = cfg.coreMemory as Record<string, unknown> | undefined;
const coreMemoryEnabled = coreMemoryRaw?.enabled !== false; // enabled by default
const coreMemoryMaxEntries =
typeof coreMemoryRaw?.maxEntries === "number" ? coreMemoryRaw.maxEntries : 50;
return {
neo4j: {
uri: neo4jRaw.uri,
username: neo4jUsername,
password: neo4jPassword,
},
embedding: {
provider,
apiKey,
model: embeddingModel,
baseUrl,
},
autoCapture: cfg.autoCapture !== false,
autoRecall: cfg.autoRecall !== false,
coreMemory: {
enabled: coreMemoryEnabled,
maxEntries: coreMemoryMaxEntries,
},
};
},
};

View File

@@ -0,0 +1,104 @@
/**
* Embedding generation for memory-neo4j.
*
* Supports both OpenAI and Ollama providers.
*/
import OpenAI from "openai";
import type { EmbeddingProvider } from "./config.js";
export class Embeddings {
private client: OpenAI | null = null;
private readonly provider: EmbeddingProvider;
private readonly baseUrl: string;
constructor(
private readonly apiKey: string | undefined,
private readonly model: string = "text-embedding-3-small",
provider: EmbeddingProvider = "openai",
baseUrl?: string,
) {
this.provider = provider;
this.baseUrl = baseUrl ?? (provider === "ollama" ? "http://localhost:11434" : "");
if (provider === "openai") {
if (!apiKey) {
throw new Error("API key required for OpenAI embeddings");
}
this.client = new OpenAI({ apiKey });
}
}
/**
* Generate an embedding vector for a single text.
*/
async embed(text: string): Promise<number[]> {
if (this.provider === "ollama") {
return this.embedOllama(text);
}
return this.embedOpenAI(text);
}
/**
* Generate embeddings for multiple texts.
* Returns array of embeddings in the same order as input.
*/
async embedBatch(texts: string[]): Promise<number[][]> {
if (texts.length === 0) {
return [];
}
if (this.provider === "ollama") {
// Ollama doesn't support batch, so we do sequential
return Promise.all(texts.map((t) => this.embedOllama(t)));
}
return this.embedBatchOpenAI(texts);
}
private async embedOpenAI(text: string): Promise<number[]> {
if (!this.client) {
throw new Error("OpenAI client not initialized");
}
const response = await this.client.embeddings.create({
model: this.model,
input: text,
});
return response.data[0].embedding;
}
private async embedBatchOpenAI(texts: string[]): Promise<number[][]> {
if (!this.client) {
throw new Error("OpenAI client not initialized");
}
const response = await this.client.embeddings.create({
model: this.model,
input: texts,
});
// Sort by index to ensure correct order
return response.data.toSorted((a, b) => a.index - b.index).map((d) => d.embedding);
}
private async embedOllama(text: string): Promise<number[]> {
const url = `${this.baseUrl}/api/embed`;
const response = await fetch(url, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model: this.model,
input: text,
}),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Ollama embedding failed: ${response.status} ${error}`);
}
const data = (await response.json()) as { embeddings?: number[][] };
if (!data.embeddings?.[0]) {
throw new Error("No embedding returned from Ollama");
}
return data.embeddings[0];
}
}

View File

@@ -0,0 +1,774 @@
/**
* LLM-based entity extraction and auto-capture decision for memory-neo4j.
*
* Uses Gemini Flash via OpenRouter for:
* 1. Entity extraction: Extract entities and relationships from stored memories
* 2. Auto-capture decision: Decide what's worth remembering from conversations
*
* Both run as background fire-and-forget operations with graceful degradation.
*/
import { randomUUID } from "node:crypto";
import type { ExtractionConfig } from "./config.js";
import type { Embeddings } from "./embeddings.js";
import type { Neo4jMemoryClient } from "./neo4j-client.js";
import type { CaptureItem, EntityType, ExtractionResult, MemoryCategory } from "./schema.js";
import { ALLOWED_RELATIONSHIP_TYPES, ENTITY_TYPES } from "./schema.js";
// ============================================================================
// Types
// ============================================================================
type Logger = {
info: (msg: string) => void;
warn: (msg: string) => void;
error: (msg: string) => void;
debug?: (msg: string) => void;
};
// ============================================================================
// Extraction Prompt
// ============================================================================
const ENTITY_EXTRACTION_PROMPT = `You are an entity extraction system for a personal memory store.
Extract entities and relationships from this memory text.
Memory: "{text}"
Return JSON:
{
"entities": [
{"name": "tarun", "type": "person", "aliases": ["boss"], "description": "brief description"}
],
"relationships": [
{"source": "tarun", "target": "abundent", "type": "WORKS_AT", "confidence": 0.95}
],
"tags": [
{"name": "neo4j", "category": "technology"}
]
}
Rules:
- Normalize entity names to lowercase
- Entity types: person, organization, location, event, concept
- Relationship types: WORKS_AT, LIVES_AT, KNOWS, MARRIED_TO, PREFERS, DECIDED, RELATED_TO
- Confidence: 0.0-1.0
- Only extract what's explicitly stated or strongly implied
- Return empty arrays if nothing to extract
- Keep entity descriptions brief (1 sentence max)`;
// ============================================================================
// Auto-Capture Decision Prompt
// ============================================================================
const AUTO_CAPTURE_PROMPT = `You are an AI memory curator. Given these user messages from a conversation, identify information worth storing as long-term memories.
Only extract:
- Personal preferences and opinions ("I prefer dark mode", "I like TypeScript")
- Important facts about people, places, organizations
- Decisions made ("We decided to use Neo4j", "Going with plan A")
- Contact information (emails, phone numbers, usernames)
- Important events or dates
- Technical decisions and configurations
Do NOT extract:
- General questions or instructions to the AI
- Routine greetings or acknowledgments
- Information that is too vague or contextual
- Information already in system prompts or documentation
Categories:
- "core": Foundational identity info that should ALWAYS be remembered (user's name, role, company, key relationships, critical preferences that define who they are). Use sparingly - only for truly foundational facts.
- "preference": User preferences and opinions
- "fact": Facts about people, places, things
- "decision": Decisions made
- "entity": Entity-focused memories
- "other": Miscellaneous
Messages:
"""
{messages}
"""
Return JSON:
{
"memories": [
{"text": "concise memory text", "category": "core|preference|fact|decision|entity|other", "importance": 0.7}
]
}
If nothing is worth remembering, return: {"memories": []}`;
// ============================================================================
// OpenRouter API Client
// ============================================================================
async function callOpenRouter(config: ExtractionConfig, prompt: string): Promise<string | null> {
for (let attempt = 0; attempt <= config.maxRetries; attempt++) {
try {
const response = await fetch(`${config.baseUrl}/chat/completions`, {
method: "POST",
headers: {
Authorization: `Bearer ${config.apiKey}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
model: config.model,
messages: [{ role: "user", content: prompt }],
temperature: config.temperature,
response_format: { type: "json_object" },
}),
});
if (!response.ok) {
const body = await response.text().catch(() => "");
throw new Error(`OpenRouter API error ${response.status}: ${body}`);
}
const data = (await response.json()) as {
choices?: Array<{ message?: { content?: string } }>;
};
return data.choices?.[0]?.message?.content ?? null;
} catch (err) {
if (attempt >= config.maxRetries) {
throw err;
}
// Exponential backoff
await new Promise((resolve) => setTimeout(resolve, 500 * Math.pow(2, attempt)));
}
}
return null;
}
// ============================================================================
// Entity Extraction
// ============================================================================
/**
* Extract entities and relationships from a memory text using LLM.
*/
export async function extractEntities(
text: string,
config: ExtractionConfig,
): Promise<ExtractionResult | null> {
if (!config.enabled) {
return null;
}
const prompt = ENTITY_EXTRACTION_PROMPT.replace("{text}", text);
try {
const content = await callOpenRouter(config, prompt);
if (!content) {
return null;
}
const parsed = JSON.parse(content) as Record<string, unknown>;
return validateExtractionResult(parsed);
} catch {
// Will be handled by caller; don't throw for parse errors
return null;
}
}
/**
* Validate and sanitize LLM extraction output.
*/
function validateExtractionResult(raw: Record<string, unknown>): ExtractionResult {
const entities = Array.isArray(raw.entities) ? raw.entities : [];
const relationships = Array.isArray(raw.relationships) ? raw.relationships : [];
const tags = Array.isArray(raw.tags) ? raw.tags : [];
const validEntityTypes = new Set<string>(ENTITY_TYPES);
return {
entities: entities
.filter(
(e: unknown): e is Record<string, unknown> =>
e !== null &&
typeof e === "object" &&
typeof (e as Record<string, unknown>).name === "string" &&
typeof (e as Record<string, unknown>).type === "string",
)
.map((e) => ({
name: String(e.name).trim().toLowerCase(),
type: validEntityTypes.has(String(e.type)) ? (String(e.type) as EntityType) : "concept",
aliases: Array.isArray(e.aliases)
? (e.aliases as unknown[])
.filter((a): a is string => typeof a === "string")
.map((a) => a.trim().toLowerCase())
: undefined,
description: typeof e.description === "string" ? e.description : undefined,
}))
.filter((e) => e.name.length > 0),
relationships: relationships
.filter(
(r: unknown): r is Record<string, unknown> =>
r !== null &&
typeof r === "object" &&
typeof (r as Record<string, unknown>).source === "string" &&
typeof (r as Record<string, unknown>).target === "string" &&
typeof (r as Record<string, unknown>).type === "string" &&
ALLOWED_RELATIONSHIP_TYPES.has(String((r as Record<string, unknown>).type)),
)
.map((r) => ({
source: String(r.source).trim().toLowerCase(),
target: String(r.target).trim().toLowerCase(),
type: String(r.type),
confidence: typeof r.confidence === "number" ? Math.min(1, Math.max(0, r.confidence)) : 0.7,
})),
tags: tags
.filter(
(t: unknown): t is Record<string, unknown> =>
t !== null &&
typeof t === "object" &&
typeof (t as Record<string, unknown>).name === "string",
)
.map((t) => ({
name: String(t.name).trim().toLowerCase(),
category: typeof t.category === "string" ? t.category : "topic",
}))
.filter((t) => t.name.length > 0),
};
}
// ============================================================================
// Background Extraction Pipeline
// ============================================================================
/**
* Run entity extraction in the background for a stored memory.
* Fire-and-forget: errors are logged but never propagated.
*
* Flow:
* 1. Call LLM to extract entities and relationships
* 2. MERGE Entity nodes (idempotent)
* 3. Create MENTIONS relationships from Memory → Entity
* 4. Create inter-Entity relationships (WORKS_AT, KNOWS, etc.)
* 5. Tag the memory
* 6. Update extractionStatus to "complete" or "failed"
*/
export async function runBackgroundExtraction(
memoryId: string,
text: string,
db: Neo4jMemoryClient,
embeddings: Embeddings,
config: ExtractionConfig,
logger: Logger,
): Promise<void> {
if (!config.enabled) {
await db.updateExtractionStatus(memoryId, "skipped").catch(() => {});
return;
}
try {
const result = await extractEntities(text, config);
if (!result) {
await db.updateExtractionStatus(memoryId, "failed");
return;
}
// Empty extraction is valid — not all memories have extractable entities
if (
result.entities.length === 0 &&
result.relationships.length === 0 &&
result.tags.length === 0
) {
await db.updateExtractionStatus(memoryId, "complete");
return;
}
// Generate embeddings for entity names (for entity vector search)
let entityEmbeddings: Map<string, number[]> | undefined;
if (result.entities.length > 0) {
try {
const names = result.entities.map((e) => e.name);
const vectors = await embeddings.embedBatch(names);
entityEmbeddings = new Map(names.map((n, i) => [n, vectors[i]]));
} catch (err) {
logger.debug?.(`memory-neo4j: entity embedding generation failed: ${String(err)}`);
}
}
// MERGE Entity nodes
for (const entity of result.entities) {
try {
await db.mergeEntity({
id: randomUUID(),
name: entity.name,
type: entity.type,
aliases: entity.aliases,
description: entity.description,
embedding: entityEmbeddings?.get(entity.name),
});
// Create MENTIONS relationship
await db.createMentions(memoryId, entity.name, "context", 1.0);
} catch (err) {
logger.warn(`memory-neo4j: entity merge failed for "${entity.name}": ${String(err)}`);
}
}
// Create inter-Entity relationships
for (const rel of result.relationships) {
try {
await db.createEntityRelationship(rel.source, rel.target, rel.type, rel.confidence);
} catch (err) {
logger.debug?.(
`memory-neo4j: relationship creation failed: ${rel.source}->${rel.target}: ${String(err)}`,
);
}
}
// Tag the memory
for (const tag of result.tags) {
try {
await db.tagMemory(memoryId, tag.name, tag.category);
} catch (err) {
logger.debug?.(`memory-neo4j: tagging failed for "${tag.name}": ${String(err)}`);
}
}
await db.updateExtractionStatus(memoryId, "complete");
logger.info(
`memory-neo4j: extraction complete for ${memoryId.slice(0, 8)}` +
`${result.entities.length} entities, ${result.relationships.length} rels, ${result.tags.length} tags`,
);
} catch (err) {
logger.warn(`memory-neo4j: extraction failed for ${memoryId.slice(0, 8)}: ${String(err)}`);
await db.updateExtractionStatus(memoryId, "failed").catch(() => {});
}
}
// ============================================================================
// Sleep Cycle - Five Phase Memory Consolidation
// ============================================================================
/**
* Sleep Cycle Result - aggregated stats from all five phases.
*/
export type SleepCycleResult = {
// Phase 1: Deduplication
dedup: {
clustersFound: number;
memoriesMerged: number;
};
// Phase 2: Core Promotion
promotion: {
candidatesFound: number;
promoted: number;
};
// Phase 3: Decay & Pruning
decay: {
memoriesPruned: number;
};
// Phase 4: Entity Extraction
extraction: {
total: number;
processed: number;
succeeded: number;
failed: number;
};
// Phase 5: Orphan Cleanup
cleanup: {
entitiesRemoved: number;
tagsRemoved: number;
};
// Overall
durationMs: number;
aborted: boolean;
};
export type SleepCycleOptions = {
// Common
agentId?: string;
abortSignal?: AbortSignal;
// Phase 1: Deduplication
dedupThreshold?: number; // Vector similarity threshold (default: 0.95)
// Phase 2: Core Promotion
promotionImportanceThreshold?: number; // Min importance to auto-promote (default: 0.9)
promotionMinAgeDays?: number; // Min age before promotion (default: 7)
// Phase 3: Decay
decayRetentionThreshold?: number; // Below this, memory is pruned (default: 0.1)
decayBaseHalfLifeDays?: number; // Base half-life in days (default: 30)
decayImportanceMultiplier?: number; // How much importance extends half-life (default: 2)
// Phase 4: Extraction
extractionBatchSize?: number; // Memories per batch (default: 50)
extractionDelayMs?: number; // Delay between batches (default: 1000)
// Progress callback
onPhaseStart?: (phase: "dedup" | "promotion" | "decay" | "extraction" | "cleanup") => void;
onProgress?: (phase: string, message: string) => void;
};
/**
* Run the full sleep cycle - five phases of memory consolidation.
*
* This mimics how human memory consolidation works during sleep:
* 1. DEDUPLICATION - Merge near-duplicate memories (reduce redundancy)
* 2. CORE PROMOTION - Promote high-importance memories to core status
* 3. DECAY/PRUNING - Remove old, low-importance memories (forgetting curve)
* 4. EXTRACTION - Form entity relationships (strengthen connections)
* 5. CLEANUP - Remove orphaned entities/tags (garbage collection)
*
* Benefits:
* - Reduces latency during active conversations
* - Prevents memory bloat and "self-degradation"
* - Cleaner separation between capture and consolidation
*
* Research basis:
* - Ebbinghaus forgetting curve for decay
* - FadeMem importance-weighted retention
* - Graphiti/Zep edge deduplication patterns
*/
export async function runSleepCycle(
db: Neo4jMemoryClient,
embeddings: Embeddings,
config: ExtractionConfig,
logger: Logger,
options: SleepCycleOptions = {},
): Promise<SleepCycleResult> {
const startTime = Date.now();
const {
agentId,
abortSignal,
dedupThreshold = 0.95,
promotionImportanceThreshold = 0.9,
promotionMinAgeDays = 7,
decayRetentionThreshold = 0.1,
decayBaseHalfLifeDays = 30,
decayImportanceMultiplier = 2,
extractionBatchSize = 50,
extractionDelayMs = 1000,
onPhaseStart,
onProgress,
} = options;
const result: SleepCycleResult = {
dedup: { clustersFound: 0, memoriesMerged: 0 },
promotion: { candidatesFound: 0, promoted: 0 },
decay: { memoriesPruned: 0 },
extraction: { total: 0, processed: 0, succeeded: 0, failed: 0 },
cleanup: { entitiesRemoved: 0, tagsRemoved: 0 },
durationMs: 0,
aborted: false,
};
// --------------------------------------------------------------------------
// Phase 1: Deduplication
// --------------------------------------------------------------------------
if (!abortSignal?.aborted) {
onPhaseStart?.("dedup");
logger.info("memory-neo4j: [sleep] Phase 1: Deduplication");
try {
const clusters = await db.findDuplicateClusters(dedupThreshold, agentId);
result.dedup.clustersFound = clusters.length;
for (const cluster of clusters) {
if (abortSignal?.aborted) {
break;
}
const { deletedCount } = await db.mergeMemoryCluster(
cluster.memoryIds,
cluster.importances,
);
result.dedup.memoriesMerged += deletedCount;
onProgress?.("dedup", `Merged cluster of ${cluster.memoryIds.length} → 1`);
}
logger.info(
`memory-neo4j: [sleep] Phase 1 complete — ${result.dedup.clustersFound} clusters, ${result.dedup.memoriesMerged} merged`,
);
} catch (err) {
logger.warn(`memory-neo4j: [sleep] Phase 1 error: ${String(err)}`);
}
}
// --------------------------------------------------------------------------
// Phase 2: Core Promotion
// --------------------------------------------------------------------------
if (!abortSignal?.aborted) {
onPhaseStart?.("promotion");
logger.info("memory-neo4j: [sleep] Phase 2: Core Promotion");
try {
const candidates = await db.findPromotionCandidates({
importanceThreshold: promotionImportanceThreshold,
minAgeDays: promotionMinAgeDays,
agentId,
});
result.promotion.candidatesFound = candidates.length;
if (candidates.length > 0) {
const ids = candidates.map((m) => m.id);
result.promotion.promoted = await db.promoteToCore(ids);
for (const c of candidates) {
onProgress?.("promotion", `Promoted "${c.text.slice(0, 50)}..." to core`);
}
}
logger.info(
`memory-neo4j: [sleep] Phase 2 complete — ${result.promotion.promoted} memories promoted to core`,
);
} catch (err) {
logger.warn(`memory-neo4j: [sleep] Phase 2 error: ${String(err)}`);
}
}
// --------------------------------------------------------------------------
// Phase 3: Decay & Pruning
// --------------------------------------------------------------------------
if (!abortSignal?.aborted) {
onPhaseStart?.("decay");
logger.info("memory-neo4j: [sleep] Phase 3: Decay & Pruning");
try {
const decayed = await db.findDecayedMemories({
retentionThreshold: decayRetentionThreshold,
baseHalfLifeDays: decayBaseHalfLifeDays,
importanceMultiplier: decayImportanceMultiplier,
agentId,
});
if (decayed.length > 0) {
const ids = decayed.map((m) => m.id);
result.decay.memoriesPruned = await db.pruneMemories(ids);
onProgress?.("decay", `Pruned ${result.decay.memoriesPruned} decayed memories`);
}
logger.info(
`memory-neo4j: [sleep] Phase 3 complete — ${result.decay.memoriesPruned} memories pruned`,
);
} catch (err) {
logger.warn(`memory-neo4j: [sleep] Phase 3 error: ${String(err)}`);
}
}
// --------------------------------------------------------------------------
// Phase 4: Entity Extraction
// --------------------------------------------------------------------------
if (!abortSignal?.aborted && config.enabled) {
onPhaseStart?.("extraction");
logger.info("memory-neo4j: [sleep] Phase 4: Entity Extraction");
try {
// Get initial count
const counts = await db.countByExtractionStatus(agentId);
result.extraction.total = counts.pending;
if (result.extraction.total > 0) {
let hasMore = true;
while (hasMore && !abortSignal?.aborted) {
const pending = await db.listPendingExtractions(extractionBatchSize, agentId);
if (pending.length === 0) {
hasMore = false;
break;
}
for (const memory of pending) {
if (abortSignal?.aborted) {
break;
}
try {
await runBackgroundExtraction(memory.id, memory.text, db, embeddings, config, logger);
result.extraction.succeeded++;
} catch (err) {
logger.warn(
`memory-neo4j: extraction failed for ${memory.id.slice(0, 8)}: ${String(err)}`,
);
result.extraction.failed++;
}
result.extraction.processed++;
if (result.extraction.processed % 10 === 0) {
onProgress?.(
"extraction",
`${result.extraction.processed}/${result.extraction.total} processed`,
);
}
}
// Delay between batches
if (hasMore && !abortSignal?.aborted) {
await new Promise((resolve) => setTimeout(resolve, extractionDelayMs));
}
}
}
logger.info(
`memory-neo4j: [sleep] Phase 4 complete — ${result.extraction.succeeded} extracted, ${result.extraction.failed} failed`,
);
} catch (err) {
logger.warn(`memory-neo4j: [sleep] Phase 4 error: ${String(err)}`);
}
} else if (!config.enabled) {
logger.info("memory-neo4j: [sleep] Phase 4 skipped — extraction not enabled");
}
// --------------------------------------------------------------------------
// Phase 5: Orphan Cleanup
// --------------------------------------------------------------------------
if (!abortSignal?.aborted) {
onPhaseStart?.("cleanup");
logger.info("memory-neo4j: [sleep] Phase 5: Orphan Cleanup");
try {
// Clean up orphan entities
const orphanEntities = await db.findOrphanEntities();
if (orphanEntities.length > 0) {
result.cleanup.entitiesRemoved = await db.deleteOrphanEntities(
orphanEntities.map((e) => e.id),
);
onProgress?.("cleanup", `Removed ${result.cleanup.entitiesRemoved} orphan entities`);
}
// Clean up orphan tags
const orphanTags = await db.findOrphanTags();
if (orphanTags.length > 0) {
result.cleanup.tagsRemoved = await db.deleteOrphanTags(orphanTags.map((t) => t.id));
onProgress?.("cleanup", `Removed ${result.cleanup.tagsRemoved} orphan tags`);
}
logger.info(
`memory-neo4j: [sleep] Phase 5 complete — ${result.cleanup.entitiesRemoved} entities, ${result.cleanup.tagsRemoved} tags removed`,
);
} catch (err) {
logger.warn(`memory-neo4j: [sleep] Phase 5 error: ${String(err)}`);
}
}
result.durationMs = Date.now() - startTime;
result.aborted = abortSignal?.aborted ?? false;
logger.info(
`memory-neo4j: [sleep] Sleep cycle complete in ${(result.durationMs / 1000).toFixed(1)}s` +
(result.aborted ? " (aborted)" : ""),
);
return result;
}
// ============================================================================
// Auto-Capture Decision
// ============================================================================
/**
* Evaluate user messages and decide what's worth storing as long-term memory.
* Returns a list of memory items to store, or empty if nothing worth keeping.
*/
export async function evaluateAutoCapture(
userMessages: string[],
config: ExtractionConfig,
): Promise<CaptureItem[]> {
if (!config.enabled || userMessages.length === 0) {
return [];
}
const combined = userMessages.join("\n\n");
if (combined.length < 10) {
return [];
}
const prompt = AUTO_CAPTURE_PROMPT.replace("{messages}", combined);
try {
const content = await callOpenRouter(config, prompt);
if (!content) {
return [];
}
const parsed = JSON.parse(content) as Record<string, unknown>;
return validateCaptureDecision(parsed);
} catch {
// Silently fail — auto-capture is best-effort
return [];
}
}
/**
* Validate and sanitize the auto-capture LLM output.
*/
function validateCaptureDecision(raw: Record<string, unknown>): CaptureItem[] {
const memories = Array.isArray(raw.memories) ? raw.memories : [];
const validCategories = new Set<string>(["preference", "fact", "decision", "entity", "other"]);
return memories
.filter(
(m: unknown): m is Record<string, unknown> =>
m !== null &&
typeof m === "object" &&
typeof (m as Record<string, unknown>).text === "string" &&
(m as Record<string, unknown>).text !== "",
)
.map((m) => ({
text: String(m.text).slice(0, 2000), // cap length
category: validCategories.has(String(m.category))
? (String(m.category) as MemoryCategory)
: "other",
importance: typeof m.importance === "number" ? Math.min(1, Math.max(0, m.importance)) : 0.7,
}))
.slice(0, 5); // Max 5 captures per conversation
}
// ============================================================================
// Message Extraction Helper
// ============================================================================
/**
* Extract user message texts from the event.messages array.
* Handles both string content and content block arrays.
*/
export function extractUserMessages(messages: unknown[]): string[] {
const texts: string[] = [];
for (const msg of messages) {
if (!msg || typeof msg !== "object") {
continue;
}
const msgObj = msg as Record<string, unknown>;
// Only process user messages for auto-capture
if (msgObj.role !== "user") {
continue;
}
const content = msgObj.content;
if (typeof content === "string") {
texts.push(content);
continue;
}
if (Array.isArray(content)) {
for (const block of content) {
if (
block &&
typeof block === "object" &&
"type" in block &&
(block as Record<string, unknown>).type === "text" &&
"text" in block &&
typeof (block as Record<string, unknown>).text === "string"
) {
texts.push((block as Record<string, unknown>).text as string);
}
}
}
}
// Filter out noise
return texts.filter(
(t) => t.length >= 10 && !t.includes("<relevant-memories>") && !t.includes("<system>"),
);
}

View File

@@ -0,0 +1,942 @@
/**
* OpenClaw Memory (Neo4j) Plugin
*
* Drop-in replacement for memory-lancedb with three-signal hybrid search,
* entity extraction, and knowledge graph capabilities.
*
* Provides:
* - memory_recall: Hybrid search (vector + BM25 + graph traversal)
* - memory_store: Store memories with background entity extraction
* - memory_forget: Delete memories with cascade cleanup
*
* Architecture decisions: see docs/memory-neo4j/ARCHITECTURE.md
*/
import type { OpenClawPluginApi } from "openclaw/plugin-sdk";
import { Type } from "@sinclair/typebox";
import { randomUUID } from "node:crypto";
import { stringEnum } from "openclaw/plugin-sdk";
import type { MemoryCategory, MemorySource } from "./schema.js";
import {
MEMORY_CATEGORIES,
memoryNeo4jConfigSchema,
resolveExtractionConfig,
vectorDimsForModel,
} from "./config.js";
import { Embeddings } from "./embeddings.js";
import { evaluateAutoCapture, extractUserMessages, runSleepCycle } from "./extractor.js";
import { Neo4jMemoryClient } from "./neo4j-client.js";
import { hybridSearch } from "./search.js";
// ============================================================================
// Plugin Definition
// ============================================================================
const memoryNeo4jPlugin = {
id: "memory-neo4j",
name: "Memory (Neo4j)",
description:
"Neo4j-backed long-term memory with three-signal hybrid search, entity extraction, and knowledge graph",
kind: "memory" as const,
configSchema: memoryNeo4jConfigSchema,
register(api: OpenClawPluginApi) {
// Parse configuration
const cfg = memoryNeo4jConfigSchema.parse(api.pluginConfig);
const extractionConfig = resolveExtractionConfig();
const vectorDim = vectorDimsForModel(cfg.embedding.model);
// Create shared resources
const db = new Neo4jMemoryClient(
cfg.neo4j.uri,
cfg.neo4j.username,
cfg.neo4j.password,
vectorDim,
api.logger,
);
const embeddings = new Embeddings(
cfg.embedding.apiKey,
cfg.embedding.model,
cfg.embedding.provider,
cfg.embedding.baseUrl,
);
api.logger.debug?.(
`memory-neo4j: registered (uri: ${cfg.neo4j.uri}, provider: ${cfg.embedding.provider}, model: ${cfg.embedding.model}, ` +
`extraction: ${extractionConfig.enabled ? extractionConfig.model : "disabled"})`,
);
// ========================================================================
// Tools (using factory pattern for agentId)
// ========================================================================
// memory_recall — Three-signal hybrid search
api.registerTool(
(ctx) => {
const agentId = ctx.agentId || "default";
return {
name: "memory_recall",
label: "Memory Recall",
description:
"Search through long-term memories. Use when you need context about user preferences, past decisions, or previously discussed topics.",
parameters: Type.Object({
query: Type.String({ description: "Search query" }),
limit: Type.Optional(Type.Number({ description: "Max results (default: 5)" })),
}),
async execute(_toolCallId: string, params: unknown) {
const { query, limit = 5 } = params as {
query: string;
limit?: number;
};
const results = await hybridSearch(
db,
embeddings,
query,
limit,
agentId,
extractionConfig.enabled,
);
if (results.length === 0) {
return {
content: [{ type: "text", text: "No relevant memories found." }],
details: { count: 0 },
};
}
const text = results
.map((r, i) => `${i + 1}. [${r.category}] ${r.text} (${(r.score * 100).toFixed(0)}%)`)
.join("\n");
const sanitizedResults = results.map((r) => ({
id: r.id,
text: r.text,
category: r.category,
importance: r.importance,
score: r.score,
}));
return {
content: [
{
type: "text",
text: `Found ${results.length} memories:\n\n${text}`,
},
],
details: { count: results.length, memories: sanitizedResults },
};
},
};
},
{ name: "memory_recall" },
);
// memory_store — Store with background entity extraction
api.registerTool(
(ctx) => {
const agentId = ctx.agentId || "default";
const sessionKey = ctx.sessionKey;
return {
name: "memory_store",
label: "Memory Store",
description:
"Save important information in long-term memory. Use for preferences, facts, decisions.",
parameters: Type.Object({
text: Type.String({ description: "Information to remember" }),
importance: Type.Optional(
Type.Number({
description: "Importance 0-1 (default: 0.7)",
}),
),
category: Type.Optional(stringEnum(MEMORY_CATEGORIES)),
}),
async execute(_toolCallId: string, params: unknown) {
const {
text,
importance = 0.7,
category = "other",
} = params as {
text: string;
importance?: number;
category?: MemoryCategory;
};
// 1. Generate embedding
const vector = await embeddings.embed(text);
// 2. Check for duplicates (vector similarity > 0.95)
const existing = await db.findSimilar(vector, 0.95, 1);
if (existing.length > 0) {
return {
content: [
{
type: "text",
text: `Similar memory already exists: "${existing[0].text}"`,
},
],
details: {
action: "duplicate",
existingId: existing[0].id,
existingText: existing[0].text,
},
};
}
// 3. Store memory immediately (fast path)
const memoryId = randomUUID();
await db.storeMemory({
id: memoryId,
text,
embedding: vector,
importance: Math.min(1, Math.max(0, importance)),
category,
source: "user" as MemorySource,
extractionStatus: extractionConfig.enabled ? "pending" : "skipped",
agentId,
sessionKey,
});
// 4. Extraction is deferred to sleep cycle (like human memory consolidation)
// See: runSleepCycleExtraction() and `openclaw memory sleep` command
return {
content: [
{
type: "text",
text: `Stored: "${text.slice(0, 100)}${text.length > 100 ? "..." : ""}"`,
},
],
details: { action: "created", id: memoryId },
};
},
};
},
{ name: "memory_store" },
);
// memory_forget — Delete with cascade
api.registerTool(
(_ctx) => {
return {
name: "memory_forget",
label: "Memory Forget",
description: "Delete specific memories. GDPR-compliant.",
parameters: Type.Object({
query: Type.Optional(Type.String({ description: "Search to find memory" })),
memoryId: Type.Optional(Type.String({ description: "Specific memory ID" })),
}),
async execute(_toolCallId: string, params: unknown) {
const { query, memoryId } = params as {
query?: string;
memoryId?: string;
};
// Direct delete by ID
if (memoryId) {
const deleted = await db.deleteMemory(memoryId);
if (!deleted) {
return {
content: [
{
type: "text",
text: `Memory ${memoryId} not found.`,
},
],
details: { action: "not_found", id: memoryId },
};
}
return {
content: [
{
type: "text",
text: `Memory ${memoryId} forgotten.`,
},
],
details: { action: "deleted", id: memoryId },
};
}
// Search-based delete
if (query) {
const vector = await embeddings.embed(query);
const results = await db.vectorSearch(vector, 5, 0.7);
if (results.length === 0) {
return {
content: [{ type: "text", text: "No matching memories found." }],
details: { found: 0 },
};
}
// Auto-delete if single high-confidence match
if (results.length === 1 && results[0].score > 0.9) {
await db.deleteMemory(results[0].id);
return {
content: [
{
type: "text",
text: `Forgotten: "${results[0].text}"`,
},
],
details: { action: "deleted", id: results[0].id },
};
}
// Multiple candidates — ask user to specify
const list = results.map((r) => `- [${r.id}] ${r.text.slice(0, 60)}...`).join("\n");
const sanitizedCandidates = results.map((r) => ({
id: r.id,
text: r.text,
category: r.category,
score: r.score,
}));
return {
content: [
{
type: "text",
text: `Found ${results.length} candidates. Specify memoryId:\n${list}`,
},
],
details: {
action: "candidates",
candidates: sanitizedCandidates,
},
};
}
return {
content: [{ type: "text", text: "Provide query or memoryId." }],
details: { error: "missing_param" },
};
},
};
},
{ name: "memory_forget" },
);
// ========================================================================
// CLI Commands
// ========================================================================
api.registerCli(
({ program }) => {
// Find existing memory command or create fallback
let memoryCmd = program.commands.find((cmd) => cmd.name() === "memory");
if (!memoryCmd) {
// Fallback if core memory CLI not registered yet
memoryCmd = program.command("memory").description("Memory commands");
}
// Add neo4j memory subcommand group
const memory = memoryCmd.command("neo4j").description("Neo4j graph memory commands");
memory
.command("list")
.description("List memory counts by agent and category")
.option("--json", "Output as JSON")
.action(async (opts: { json?: boolean }) => {
try {
await db.ensureInitialized();
const stats = await db.getMemoryStats();
if (opts.json) {
console.log(JSON.stringify(stats, null, 2));
return;
}
if (stats.length === 0) {
console.log("No memories stored.");
return;
}
// Group by agentId
const byAgent = new Map<
string,
Array<{ category: string; count: number; avgImportance: number }>
>();
for (const row of stats) {
const list = byAgent.get(row.agentId) || [];
list.push({
category: row.category,
count: row.count,
avgImportance: row.avgImportance,
});
byAgent.set(row.agentId, list);
}
// Print table for each agent
for (const [agentId, categories] of byAgent) {
const total = categories.reduce((sum, c) => sum + c.count, 0);
console.log(`\n┌─ ${agentId} (${total} total)`);
console.log("│");
console.log("│ Category Count Avg Importance");
console.log("│ ─────────────────────────────────────");
for (const { category, count, avgImportance } of categories) {
const cat = category.padEnd(12);
const cnt = String(count).padStart(5);
const imp = (avgImportance * 100).toFixed(0).padStart(3) + "%";
console.log(`${cat} ${cnt} ${imp}`);
}
console.log("└");
}
console.log("");
} catch (err) {
console.error(`Error: ${err instanceof Error ? err.message : String(err)}`);
process.exitCode = 1;
}
});
memory
.command("search")
.description("Search memories")
.argument("<query>", "Search query")
.option("--limit <n>", "Max results", "5")
.action(async (query: string, opts: { limit: string }) => {
try {
const results = await hybridSearch(
db,
embeddings,
query,
parseInt(opts.limit, 10),
"default",
extractionConfig.enabled,
);
const output = results.map((r) => ({
id: r.id,
text: r.text,
category: r.category,
importance: r.importance,
score: r.score,
}));
console.log(JSON.stringify(output, null, 2));
} catch (err) {
console.error(`Error: ${err instanceof Error ? err.message : String(err)}`);
process.exitCode = 1;
}
});
memory
.command("stats")
.description("Show memory statistics and configuration")
.action(async () => {
try {
await db.ensureInitialized();
const stats = await db.getMemoryStats();
const total = stats.reduce((sum, s) => sum + s.count, 0);
console.log("\nMemory (Neo4j) Statistics");
console.log("─────────────────────────");
console.log(`Total memories: ${total}`);
console.log(`Neo4j URI: ${cfg.neo4j.uri}`);
console.log(`Embedding: ${cfg.embedding.provider}/${cfg.embedding.model}`);
console.log(
`Extraction: ${extractionConfig.enabled ? extractionConfig.model : "disabled"}`,
);
console.log(`Auto-capture: ${cfg.autoCapture ? "enabled" : "disabled"}`);
console.log(`Auto-recall: ${cfg.autoRecall ? "enabled" : "disabled"}`);
console.log(`Core memory: ${cfg.coreMemory.enabled ? "enabled" : "disabled"}`);
if (stats.length > 0) {
// Group by category across all agents
const byCategory = new Map<string, number>();
for (const row of stats) {
byCategory.set(row.category, (byCategory.get(row.category) ?? 0) + row.count);
}
console.log("\nBy Category:");
for (const [category, count] of byCategory) {
console.log(` ${category.padEnd(12)} ${count}`);
}
// Show agent count
const agents = new Set(stats.map((s) => s.agentId));
console.log(`\nAgents: ${agents.size} (${[...agents].join(", ")})`);
}
console.log("");
} catch (err) {
console.error(`Error: ${err instanceof Error ? err.message : String(err)}`);
process.exitCode = 1;
}
});
memory
.command("sleep")
.description(
"Run sleep cycle — consolidate memories (dedup → promote → decay → extract → cleanup)",
)
.option("--agent <id>", "Agent id (default: all agents)")
.option("--dedup-threshold <n>", "Vector similarity threshold for dedup (default: 0.95)")
.option(
"--promotion-threshold <n>",
"Min importance for auto-promotion to core (default: 0.9)",
)
.option("--promotion-min-age <days>", "Min age in days before promotion (default: 7)")
.option("--decay-threshold <n>", "Decay score threshold for pruning (default: 0.1)")
.option("--decay-half-life <days>", "Base half-life in days (default: 30)")
.option("--batch-size <n>", "Extraction batch size (default: 50)")
.option("--delay <ms>", "Delay between extraction batches in ms (default: 1000)")
.action(
async (opts: {
agent?: string;
dedupThreshold?: string;
promotionThreshold?: string;
promotionMinAge?: string;
decayThreshold?: string;
decayHalfLife?: string;
batchSize?: string;
delay?: string;
}) => {
console.log("\n🌙 Memory Sleep Cycle");
console.log("═════════════════════════════════════════════════════════════");
console.log("Five-phase memory consolidation (like human sleep):\n");
console.log(" Phase 1: Deduplication — Merge near-duplicate memories");
console.log(" Phase 2: Core Promotion — Promote high-importance to core");
console.log(" Phase 3: Decay & Pruning — Remove stale low-importance memories");
console.log(" Phase 4: Extraction — Form entity relationships");
console.log(" Phase 5: Orphan Cleanup — Remove disconnected nodes\n");
try {
await db.ensureInitialized();
const result = await runSleepCycle(db, embeddings, extractionConfig, api.logger, {
agentId: opts.agent,
dedupThreshold: opts.dedupThreshold ? parseFloat(opts.dedupThreshold) : undefined,
promotionImportanceThreshold: opts.promotionThreshold
? parseFloat(opts.promotionThreshold)
: undefined,
promotionMinAgeDays: opts.promotionMinAge
? parseInt(opts.promotionMinAge, 10)
: undefined,
decayRetentionThreshold: opts.decayThreshold
? parseFloat(opts.decayThreshold)
: undefined,
decayBaseHalfLifeDays: opts.decayHalfLife
? parseInt(opts.decayHalfLife, 10)
: undefined,
extractionBatchSize: opts.batchSize ? parseInt(opts.batchSize, 10) : undefined,
extractionDelayMs: opts.delay ? parseInt(opts.delay, 10) : undefined,
onPhaseStart: (phase) => {
const phaseNames = {
dedup: "Phase 1: Deduplication",
promotion: "Phase 2: Core Promotion",
decay: "Phase 3: Decay & Pruning",
extraction: "Phase 4: Extraction",
cleanup: "Phase 5: Orphan Cleanup",
};
console.log(`\n▶ ${phaseNames[phase]}`);
console.log("─────────────────────────────────────────────────────────────");
},
onProgress: (_phase, message) => {
console.log(` ${message}`);
},
});
console.log("\n═════════════════════════════════════════════════════════════");
console.log(`✅ Sleep cycle complete in ${(result.durationMs / 1000).toFixed(1)}s`);
console.log("─────────────────────────────────────────────────────────────");
console.log(
` Deduplication: ${result.dedup.clustersFound} clusters → ${result.dedup.memoriesMerged} merged`,
);
console.log(
` Promotion: ${result.promotion.promoted}/${result.promotion.candidatesFound} promoted to core`,
);
console.log(` Decay/Pruning: ${result.decay.memoriesPruned} memories pruned`);
console.log(
` Extraction: ${result.extraction.succeeded}/${result.extraction.total} extracted` +
(result.extraction.failed > 0 ? ` (${result.extraction.failed} failed)` : ""),
);
console.log(
` Cleanup: ${result.cleanup.entitiesRemoved} entities, ${result.cleanup.tagsRemoved} tags removed`,
);
if (result.aborted) {
console.log("\n⚠ Sleep cycle was aborted before completion.");
}
console.log("");
} catch (err) {
console.error(
`\n❌ Sleep cycle failed: ${err instanceof Error ? err.message : String(err)}`,
);
process.exitCode = 1;
}
},
);
memory
.command("promote")
.description("Manually promote a memory to core status")
.argument("<id>", "Memory ID to promote")
.action(async (id: string) => {
try {
await db.ensureInitialized();
const promoted = await db.promoteToCore([id]);
if (promoted > 0) {
console.log(`✅ Memory ${id} promoted to core.`);
} else {
console.log(`❌ Memory ${id} not found.`);
process.exitCode = 1;
}
} catch (err) {
console.error(`Error: ${err instanceof Error ? err.message : String(err)}`);
process.exitCode = 1;
}
});
},
{ commands: [] }, // Adds subcommands to existing "memory" command, no conflict
);
// ========================================================================
// Lifecycle Hooks
// ========================================================================
// Track sessions where core memories have already been loaded (skip on subsequent turns).
// NOTE: This is in-memory and will be cleared on gateway restart. The agent_bootstrap
// hook below also checks for existing conversation history to avoid re-injecting core
// memories after restarts.
const bootstrappedSessions = new Set<string>();
// After compaction: clear bootstrap flag so core memories get re-injected
if (cfg.coreMemory.enabled) {
api.on("after_compaction", async (_event, ctx) => {
if (ctx.sessionKey) {
bootstrappedSessions.delete(ctx.sessionKey);
api.logger.info?.(
`memory-neo4j: cleared bootstrap flag for session ${ctx.sessionKey} after compaction`,
);
}
});
}
// Auto-recall: inject relevant memories before agent starts
api.logger.debug?.(`memory-neo4j: autoRecall=${cfg.autoRecall}`);
if (cfg.autoRecall) {
api.logger.debug?.("memory-neo4j: registering before_agent_start hook for auto-recall");
api.on("before_agent_start", async (event, ctx) => {
if (!event.prompt || event.prompt.length < 5) {
return;
}
const agentId = ctx.agentId || "default";
// Truncate prompt to avoid exceeding embedding model context length
// ~6000 chars is safe for most embedding models (leaves headroom for 2k tokens)
const MAX_QUERY_CHARS = 6000;
const query =
event.prompt.length > MAX_QUERY_CHARS
? event.prompt.slice(0, MAX_QUERY_CHARS)
: event.prompt;
try {
const results = await hybridSearch(
db,
embeddings,
query,
3,
agentId,
extractionConfig.enabled,
);
if (results.length === 0) {
return;
}
const memoryContext = results.map((r) => `- [${r.category}] ${r.text}`).join("\n");
api.logger.info?.(`memory-neo4j: injecting ${results.length} memories into context`);
api.logger.debug?.(
`memory-neo4j: auto-recall memories: ${JSON.stringify(results.map((r) => ({ id: r.id, text: r.text.slice(0, 80), category: r.category, score: r.score })))}`,
);
return {
prependContext: `<relevant-memories>\nThe following memories may be relevant to this conversation:\n${memoryContext}\n</relevant-memories>`,
};
} catch (err) {
api.logger.warn(`memory-neo4j: auto-recall failed: ${String(err)}`);
}
});
}
// Core memories: inject as virtual MEMORY.md at bootstrap time (scoped by agentId).
// Only runs on new sessions and after compaction (not every turn).
api.logger.debug?.(`memory-neo4j: coreMemory.enabled=${cfg.coreMemory.enabled}`);
if (cfg.coreMemory.enabled) {
api.logger.debug?.("memory-neo4j: registering agent_bootstrap hook for core memories");
api.on("agent_bootstrap", async (event, ctx) => {
const sessionKey = ctx.sessionKey;
// Skip if this session was already bootstrapped (avoid re-loading every turn).
// The after_compaction hook clears the flag so we re-inject after compaction.
if (sessionKey && bootstrappedSessions.has(sessionKey)) {
api.logger.debug?.(
`memory-neo4j: skipping core memory injection for already-bootstrapped session=${sessionKey}`,
);
return;
}
// Log when we're about to inject core memories for a session that wasn't tracked
// This helps diagnose cases where context might be lost after gateway restarts
if (sessionKey) {
api.logger.debug?.(
`memory-neo4j: session=${sessionKey} not in bootstrappedSessions (size=${bootstrappedSessions.size}), will check for core memories`,
);
}
try {
const agentId = ctx.agentId || "default";
const maxEntries = cfg.coreMemory.maxEntries;
api.logger.debug?.(
`memory-neo4j: loading core memories for agent=${agentId} session=${sessionKey ?? "unknown"}`,
);
// Core memories are always included (no importance filter) - if marked as core, it's important
// Results are ordered by importance desc, so most important come first up to maxEntries
const coreMemories = await db.listByCategory("core", maxEntries, 0, agentId);
if (coreMemories.length === 0) {
if (sessionKey) {
bootstrappedSessions.add(sessionKey);
}
api.logger.debug?.(
`memory-neo4j: no core memories found for agent=${agentId}, marking session as bootstrapped`,
);
return;
}
// Format core memories into a MEMORY.md-style document
let content = "# Core Memory\n\n";
content += "*Persistent context loaded from long-term memory*\n\n";
for (const mem of coreMemories) {
content += `- ${mem.text}\n`;
}
// Find and replace MEMORY.md in the files list, or add it
const files = [...event.files];
const memoryIndex = files.findIndex(
(f) => f.name === "MEMORY.md" || f.name === "memory.md",
);
const virtualFile = {
name: "MEMORY.md" as const,
path: "memory://neo4j/core-memory",
content,
missing: false,
};
const action = memoryIndex >= 0 ? "replaced" : "added";
if (memoryIndex >= 0) {
files[memoryIndex] = virtualFile;
} else {
files.push(virtualFile);
}
if (sessionKey) {
bootstrappedSessions.add(sessionKey);
}
// Log at info level when actually injecting, debug for skips
api.logger.info?.(
`memory-neo4j: ${action} MEMORY.md with ${coreMemories.length} core memories for agent=${agentId} session=${sessionKey ?? "unknown"}`,
);
return { files };
} catch (err) {
api.logger.warn(`memory-neo4j: core memory injection failed: ${String(err)}`);
}
});
}
// Auto-capture: LLM-based decision on what to store from conversations
api.logger.debug?.(
`memory-neo4j: autoCapture=${cfg.autoCapture}, extraction.enabled=${extractionConfig.enabled}`,
);
if (cfg.autoCapture) {
api.logger.debug?.("memory-neo4j: registering agent_end hook for auto-capture");
api.on("agent_end", async (event, ctx) => {
api.logger.debug?.(
`memory-neo4j: agent_end fired (success=${event.success}, messages=${event.messages?.length ?? 0})`,
);
if (!event.success || !event.messages || event.messages.length === 0) {
api.logger.debug?.("memory-neo4j: skipping - no success or empty messages");
return;
}
const agentId = ctx.agentId || "default";
const sessionKey = ctx.sessionKey;
try {
if (extractionConfig.enabled) {
// LLM-based auto-capture (Decision Q8)
const userMessages = extractUserMessages(event.messages);
if (userMessages.length === 0) {
return;
}
const items = await evaluateAutoCapture(userMessages, extractionConfig);
if (items.length === 0) {
return;
}
let stored = 0;
for (const item of items) {
try {
const vector = await embeddings.embed(item.text);
// Check for duplicates
const existing = await db.findSimilar(vector, 0.95, 1);
if (existing.length > 0) {
continue;
}
const memoryId = randomUUID();
await db.storeMemory({
id: memoryId,
text: item.text,
embedding: vector,
importance: item.importance,
category: item.category,
source: "auto-capture",
extractionStatus: "pending",
agentId,
sessionKey,
});
// Extraction deferred to sleep cycle (like human memory consolidation)
stored++;
} catch (err) {
api.logger.debug?.(`memory-neo4j: auto-capture item failed: ${String(err)}`);
}
}
if (stored > 0) {
api.logger.info(`memory-neo4j: auto-captured ${stored} memories (LLM-based)`);
}
} else {
// Fallback: rule-based capture (no extraction API key)
const userMessages = extractUserMessages(event.messages);
if (userMessages.length === 0) {
return;
}
const toCapture = userMessages.filter(
(text) => text.length >= 10 && text.length <= 500 && shouldCaptureRuleBased(text),
);
if (toCapture.length === 0) {
return;
}
let stored = 0;
for (const text of toCapture.slice(0, 3)) {
const category = detectCategory(text);
const vector = await embeddings.embed(text);
const existing = await db.findSimilar(vector, 0.95, 1);
if (existing.length > 0) {
continue;
}
await db.storeMemory({
id: randomUUID(),
text,
embedding: vector,
importance: 0.7,
category,
source: "auto-capture",
extractionStatus: "skipped",
agentId,
sessionKey,
});
stored++;
}
if (stored > 0) {
api.logger.info(`memory-neo4j: auto-captured ${stored} memories (rule-based)`);
}
}
} catch (err) {
api.logger.warn(`memory-neo4j: auto-capture failed: ${String(err)}`);
}
});
}
// ========================================================================
// Service
// ========================================================================
api.registerService({
id: "memory-neo4j",
start: async () => {
try {
await db.ensureInitialized();
api.logger.info(
`memory-neo4j: service started (uri: ${cfg.neo4j.uri}, model: ${cfg.embedding.model})`,
);
} catch (err) {
api.logger.error(
`memory-neo4j: failed to start — ${String(err)}. Memory tools will attempt lazy initialization.`,
);
// Don't throw — allow graceful degradation.
// Tools will retry initialization on first use.
}
},
stop: async () => {
await db.close();
api.logger.info("memory-neo4j: service stopped");
},
});
},
};
// ============================================================================
// Rule-based capture filter (fallback when no extraction API key)
// ============================================================================
const MEMORY_TRIGGERS = [
/remember|zapamatuj|pamatuj/i,
/prefer|radši|nechci|preferuji/i,
/decided|rozhodli|budeme používat/i,
/\+\d{10,}/,
/[\w.-]+@[\w.-]+\.\w+/,
/my\s+\w+\s+is|is\s+my/i,
/i (like|prefer|hate|love|want|need)/i,
/always|never|important/i,
];
function shouldCaptureRuleBased(text: string): boolean {
if (text.includes("<relevant-memories>")) {
return false;
}
if (text.startsWith("<") && text.includes("</")) {
return false;
}
if (text.includes("**") && text.includes("\n-")) {
return false;
}
const emojiCount = (text.match(/[\u{1F300}-\u{1F9FF}]/gu) || []).length;
if (emojiCount > 3) {
return false;
}
return MEMORY_TRIGGERS.some((r) => r.test(text));
}
function detectCategory(text: string): MemoryCategory {
const lower = text.toLowerCase();
if (/prefer|radši|like|love|hate|want/i.test(lower)) {
return "preference";
}
if (/decided|rozhodli|will use|budeme/i.test(lower)) {
return "decision";
}
if (/\+\d{10,}|@[\w.-]+\.\w+|is called|jmenuje se/i.test(lower)) {
return "entity";
}
if (/is|are|has|have|je|má|jsou/i.test(lower)) {
return "fact";
}
return "other";
}
// ============================================================================
// Export
// ============================================================================
export default memoryNeo4jPlugin;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,99 @@
{
"id": "memory-neo4j",
"kind": "memory",
"uiHints": {
"embedding.provider": {
"label": "Embedding Provider",
"placeholder": "openai",
"help": "Provider for embeddings: 'openai' or 'ollama'"
},
"embedding.apiKey": {
"label": "API Key",
"sensitive": true,
"placeholder": "sk-proj-...",
"help": "API key for OpenAI embeddings (not needed for Ollama)"
},
"embedding.model": {
"label": "Embedding Model",
"placeholder": "text-embedding-3-small",
"help": "Embedding model to use (e.g., text-embedding-3-small for OpenAI, mxbai-embed-large for Ollama)"
},
"embedding.baseUrl": {
"label": "Base URL",
"placeholder": "http://localhost:11434",
"help": "Base URL for Ollama API (optional)"
},
"neo4j.uri": {
"label": "Neo4j URI",
"placeholder": "bolt://localhost:7687",
"help": "Bolt connection URI for your Neo4j instance"
},
"neo4j.user": {
"label": "Neo4j Username",
"placeholder": "neo4j"
},
"neo4j.password": {
"label": "Neo4j Password",
"sensitive": true
},
"autoCapture": {
"label": "Auto-Capture",
"help": "Automatically capture important information from conversations"
},
"autoRecall": {
"label": "Auto-Recall",
"help": "Automatically inject relevant memories into context"
}
},
"configSchema": {
"type": "object",
"additionalProperties": false,
"properties": {
"embedding": {
"type": "object",
"additionalProperties": false,
"properties": {
"provider": {
"type": "string",
"enum": ["openai", "ollama"]
},
"apiKey": {
"type": "string"
},
"model": {
"type": "string"
},
"baseUrl": {
"type": "string"
}
}
},
"neo4j": {
"type": "object",
"additionalProperties": false,
"properties": {
"uri": {
"type": "string"
},
"user": {
"type": "string"
},
"username": {
"type": "string"
},
"password": {
"type": "string"
}
},
"required": ["uri"]
},
"autoCapture": {
"type": "boolean"
},
"autoRecall": {
"type": "boolean"
}
},
"required": ["neo4j"]
}
}

View File

@@ -0,0 +1,19 @@
{
"name": "@openclaw/memory-neo4j",
"version": "2026.2.2",
"description": "OpenClaw Neo4j-backed long-term memory plugin with three-signal hybrid search, entity extraction, and knowledge graph",
"type": "module",
"dependencies": {
"@sinclair/typebox": "0.34.48",
"neo4j-driver": "^5.27.0",
"openai": "^6.17.0"
},
"devDependencies": {
"openclaw": "workspace:*"
},
"openclaw": {
"extensions": [
"./index.ts"
]
}
}

View File

@@ -0,0 +1,174 @@
/**
* Graph schema types, Cypher query templates, and constants for memory-neo4j.
*/
// ============================================================================
// Node Types
// ============================================================================
export type MemoryCategory = "preference" | "fact" | "decision" | "entity" | "other";
export type EntityType = "person" | "organization" | "location" | "event" | "concept";
export type ExtractionStatus = "pending" | "complete" | "failed" | "skipped";
export type MemorySource = "user" | "auto-capture" | "memory-watcher" | "import";
export type MemoryNode = {
id: string;
text: string;
embedding: number[];
importance: number;
category: MemoryCategory;
source: MemorySource;
createdAt: string;
updatedAt: string;
extractionStatus: ExtractionStatus;
agentId: string;
sessionKey?: string;
};
export type EntityNode = {
id: string;
name: string;
type: EntityType;
aliases: string[];
embedding?: number[];
description?: string;
firstSeen: string;
lastSeen: string;
mentionCount: number;
};
export type TagNode = {
id: string;
name: string;
category: string;
createdAt: string;
};
// ============================================================================
// Extraction Types
// ============================================================================
export type ExtractedEntity = {
name: string;
type: EntityType;
aliases?: string[];
description?: string;
};
export type ExtractedRelationship = {
source: string;
target: string;
type: string;
confidence: number;
};
export type ExtractedTag = {
name: string;
category: string;
};
export type ExtractionResult = {
entities: ExtractedEntity[];
relationships: ExtractedRelationship[];
tags: ExtractedTag[];
};
// ============================================================================
// Auto-Capture Types
// ============================================================================
export type CaptureItem = {
text: string;
category: MemoryCategory;
importance: number;
};
export type CaptureDecision = {
memories: CaptureItem[];
};
// ============================================================================
// Search Types
// ============================================================================
export type SearchSignalResult = {
id: string;
text: string;
category: string;
importance: number;
createdAt: string;
score: number;
};
export type HybridSearchResult = {
id: string;
text: string;
category: string;
importance: number;
createdAt: string;
score: number;
};
// ============================================================================
// Input Types
// ============================================================================
export type StoreMemoryInput = {
id: string;
text: string;
embedding: number[];
importance: number;
category: MemoryCategory;
source: MemorySource;
extractionStatus: ExtractionStatus;
agentId: string;
sessionKey?: string;
};
export type MergeEntityInput = {
id: string;
name: string;
type: EntityType;
aliases?: string[];
description?: string;
embedding?: number[];
};
// ============================================================================
// Constants
// ============================================================================
export const MEMORY_CATEGORIES = ["preference", "fact", "decision", "entity", "other"] as const;
export const ENTITY_TYPES = ["person", "organization", "location", "event", "concept"] as const;
export const ALLOWED_RELATIONSHIP_TYPES = new Set([
"WORKS_AT",
"LIVES_AT",
"KNOWS",
"MARRIED_TO",
"PREFERS",
"DECIDED",
"RELATED_TO",
]);
// ============================================================================
// Lucene Helpers
// ============================================================================
const LUCENE_SPECIAL_CHARS = /[+\-&|!(){}[\]^"~*?:\\/]/g;
/**
* Escape special characters for Lucene fulltext search queries.
*/
export function escapeLucene(query: string): string {
return query.replace(LUCENE_SPECIAL_CHARS, "\\$&");
}
/**
* Validate that a relationship type is in the allowed set.
* Prevents Cypher injection via dynamic relationship type.
*/
export function validateRelationshipType(type: string): boolean {
return ALLOWED_RELATIONSHIP_TYPES.has(type);
}

View File

@@ -0,0 +1,257 @@
/**
* Three-signal hybrid search with query-adaptive RRF fusion.
*
* Combines:
* Signal 1: Vector similarity (HNSW cosine)
* Signal 2: BM25 full-text keyword matching
* Signal 3: Graph traversal (entity → MENTIONS ← memory)
*
* Fused using confidence-weighted Reciprocal Rank Fusion (RRF)
* with query-adaptive signal weights.
*
* Adapted from ~/Downloads/ontology/app/services/rrf.py
*/
import type { Embeddings } from "./embeddings.js";
import type { Neo4jMemoryClient } from "./neo4j-client.js";
import type { HybridSearchResult, SearchSignalResult } from "./schema.js";
// ============================================================================
// Query Classification
// ============================================================================
export type QueryType = "short" | "entity" | "long" | "default";
/**
* Classify a query to determine adaptive signal weights.
*
* - short (1-2 words): BM25 excels at exact keyword matching
* - entity (proper nouns detected): Graph traversal finds connected memories
* - long (5+ words): Vector captures semantic intent better
* - default: balanced weights
*/
export function classifyQuery(query: string): QueryType {
const words = query.trim().split(/\s+/);
const wordCount = words.length;
// Short queries: 1-2 words → boost BM25
if (wordCount <= 2) {
return "short";
}
// Long queries: 5+ words → boost vector
if (wordCount >= 5) {
return "long";
}
// Entity detection: check for capitalized words (proper nouns)
// Heuristic: if more than half of non-first words are capitalized
const capitalizedWords = words
.slice(1) // skip first word (often capitalized anyway)
.filter(
(w) =>
/^[A-Z]/.test(w) &&
!/^(I|A|An|The|Is|Are|Was|Were|What|Who|Where|When|How|Why|Do|Does|Did)$/.test(w),
);
if (capitalizedWords.length > 0) {
return "entity";
}
// Check for question patterns targeting entities
if (/^(who|where|what)\s+(is|does|did|was|were)\s/i.test(query)) {
return "entity";
}
return "default";
}
/**
* Get adaptive signal weights based on query type.
* Returns [vectorWeight, bm25Weight, graphWeight].
*
* Decision Q7: Query-adaptive RRF weights
* - Short → boost BM25 (keyword matching)
* - Entity → boost graph (relationship traversal)
* - Long → boost vector (semantic similarity)
*/
export function getAdaptiveWeights(
queryType: QueryType,
graphEnabled: boolean,
): [number, number, number] {
const graphBase = graphEnabled ? 1.0 : 0.0;
switch (queryType) {
case "short":
return [0.8, 1.2, graphBase * 1.0];
case "entity":
return [0.8, 1.0, graphBase * 1.3];
case "long":
return [1.2, 0.7, graphBase * 0.8];
case "default":
default:
return [1.0, 1.0, graphBase * 1.0];
}
}
// ============================================================================
// Confidence-Weighted RRF Fusion
// ============================================================================
type SignalEntry = {
rank: number; // 1-indexed
score: number; // 0-1 normalized
};
type FusedCandidate = {
id: string;
text: string;
category: string;
importance: number;
createdAt: string;
rrfScore: number;
};
/**
* Fuse multiple search signals using confidence-weighted RRF.
*
* Formula: RRF_conf(d) = Σ w_i × score_i(d) / (k + rank_i(d))
*
* Unlike standard RRF which only uses ranks, this variant preserves
* score magnitude: rank-1 with score 0.99 contributes more than
* rank-1 with score 0.55.
*
* Reference: Cormack et al. (2009), extended with confidence weighting
* from ~/Downloads/ontology/app/services/rrf.py
*/
function fuseWithConfidenceRRF(
signals: SearchSignalResult[][],
k: number,
weights: number[],
): FusedCandidate[] {
// Build per-signal rank/score lookups
const signalMaps: Map<string, SignalEntry>[] = signals.map((signal) => {
const map = new Map<string, SignalEntry>();
for (let i = 0; i < signal.length; i++) {
const entry = signal[i];
// If duplicate in same signal, keep first (higher ranked)
if (!map.has(entry.id)) {
map.set(entry.id, { rank: i + 1, score: entry.score });
}
}
return map;
});
// Collect all unique candidate IDs with their metadata
const candidateMetadata = new Map<
string,
{ text: string; category: string; importance: number; createdAt: string }
>();
for (const signal of signals) {
for (const entry of signal) {
if (!candidateMetadata.has(entry.id)) {
candidateMetadata.set(entry.id, {
text: entry.text,
category: entry.category,
importance: entry.importance,
createdAt: entry.createdAt,
});
}
}
}
// Calculate confidence-weighted RRF score for each candidate
const results: FusedCandidate[] = [];
for (const [id, meta] of candidateMetadata) {
let rrfScore = 0;
for (let i = 0; i < signalMaps.length; i++) {
const entry = signalMaps[i].get(id);
if (entry && entry.rank > 0) {
// Confidence-weighted: multiply by original score
rrfScore += weights[i] * entry.score * (1 / (k + entry.rank));
}
}
results.push({
id,
text: meta.text,
category: meta.category,
importance: meta.importance,
createdAt: meta.createdAt,
rrfScore,
});
}
// Sort by RRF score descending
results.sort((a, b) => b.rrfScore - a.rrfScore);
return results;
}
// ============================================================================
// Hybrid Search Orchestrator
// ============================================================================
/**
* Perform a three-signal hybrid search with query-adaptive RRF fusion.
*
* 1. Embed the query
* 2. Classify query for adaptive weights
* 3. Run three signals in parallel
* 4. Fuse with confidence-weighted RRF
* 5. Return top results
*
* Graceful degradation: if any signal fails, RRF works with remaining signals.
* If graph search is not enabled (no extraction API key), uses 2-signal fusion.
*/
export async function hybridSearch(
db: Neo4jMemoryClient,
embeddings: Embeddings,
query: string,
limit: number = 5,
agentId: string = "default",
graphEnabled: boolean = false,
options: {
rrfK?: number;
candidateMultiplier?: number;
graphFiringThreshold?: number;
} = {},
): Promise<HybridSearchResult[]> {
const { rrfK = 60, candidateMultiplier = 4, graphFiringThreshold = 0.3 } = options;
const candidateLimit = Math.floor(Math.min(200, Math.max(1, limit * candidateMultiplier)));
// 1. Generate query embedding
const queryEmbedding = await embeddings.embed(query);
// 2. Classify query and get adaptive weights
const queryType = classifyQuery(query);
const weights = getAdaptiveWeights(queryType, graphEnabled);
// 3. Run signals in parallel
const [vectorResults, bm25Results, graphResults] = await Promise.all([
db.vectorSearch(queryEmbedding, candidateLimit, 0.1, agentId),
db.bm25Search(query, candidateLimit, agentId),
graphEnabled
? db.graphSearch(query, candidateLimit, graphFiringThreshold, agentId)
: Promise.resolve([] as SearchSignalResult[]),
]);
// 4. Fuse with confidence-weighted RRF
const fused = fuseWithConfidenceRRF([vectorResults, bm25Results, graphResults], rrfK, weights);
// 5. Return top results, normalized to 0-100% display scores
const maxRrf = fused.length > 0 ? fused[0].rrfScore : 1;
const normalizer = maxRrf > 0 ? 1 / maxRrf : 1;
return fused.slice(0, limit).map((r) => ({
id: r.id,
text: r.text,
category: r.category,
importance: r.importance,
createdAt: r.createdAt,
score: Math.min(1, r.rrfScore * normalizer), // Normalize to 0-1
}));
}

View File

@@ -0,0 +1,19 @@
{
"compilerOptions": {
"target": "ES2023",
"lib": ["ES2023"],
"module": "ESNext",
"moduleResolution": "bundler",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"declaration": true,
"declarationMap": true,
"sourceMap": true,
"outDir": "./dist",
"rootDir": "."
},
"include": ["*.ts"],
"exclude": ["node_modules", "dist", "*.test.ts"]
}

View File

@@ -209,7 +209,7 @@
"engines": {
"node": ">=22.12.0"
},
"packageManager": "pnpm@10.23.0",
"packageManager": "pnpm@10.28.2",
"pnpm": {
"minimumReleaseAge": 2880,
"overrides": {

54
pnpm-lock.yaml generated
View File

@@ -419,6 +419,22 @@ importers:
specifier: workspace:*
version: link:../..
extensions/memory-neo4j:
dependencies:
'@sinclair/typebox':
specifier: 0.34.47
version: 0.34.47
neo4j-driver:
specifier: ^5.27.0
version: 5.28.3
openai:
specifier: ^6.17.0
version: 6.17.0(ws@8.19.0)(zod@4.3.6)
devDependencies:
openclaw:
specifier: workspace:*
version: link:../..
extensions/minimax-portal-auth:
devDependencies:
openclaw:
@@ -3343,6 +3359,9 @@ packages:
buffer-from@1.1.2:
resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==}
buffer@6.0.3:
resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==}
bun-types@1.3.6:
resolution: {integrity: sha512-OlFwHcnNV99r//9v5IIOgQ9Uk37gZqrNMCcqEaExdkVq3Avwqok1bJFmvGMCkCE0FqzdY8VMOZpfpR3lwI+CsQ==}
@@ -4563,6 +4582,15 @@ packages:
resolution: {integrity: sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==}
engines: {node: '>= 0.6'}
neo4j-driver-bolt-connection@5.28.3:
resolution: {integrity: sha512-wqHBYcU0FVRDmdsoZ+Fk0S/InYmu9/4BT6fPYh45Jimg/J7vQBUcdkiHGU7nop7HRb1ZgJmL305mJb6g5Bv35Q==}
neo4j-driver-core@5.28.3:
resolution: {integrity: sha512-Jk+hAmjFmO5YzVH/U7FyKXigot9zmIfLz6SZQy0xfr4zfTE/S8fOYFOGqKQTHBE86HHOWH2RbTslbxIb+XtU2g==}
neo4j-driver@5.28.3:
resolution: {integrity: sha512-k7c0wEh3HoONv1v5AyLp9/BDAbYHJhz2TZvzWstSEU3g3suQcXmKEaYBfrK2UMzxcy3bCT0DrnfRbzsOW5G/Ag==}
netmask@2.0.2:
resolution: {integrity: sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==}
engines: {node: '>= 0.4.0'}
@@ -5112,6 +5140,9 @@ packages:
resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==}
engines: {node: '>= 18'}
rxjs@7.8.2:
resolution: {integrity: sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==}
safe-buffer@5.1.2:
resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==}
@@ -8899,6 +8930,11 @@ snapshots:
buffer-from@1.1.2: {}
buffer@6.0.3:
dependencies:
base64-js: 1.5.1
ieee754: 1.2.1
bun-types@1.3.6:
dependencies:
'@types/node': 25.2.3
@@ -10166,6 +10202,20 @@ snapshots:
negotiator@1.0.0: {}
neo4j-driver-bolt-connection@5.28.3:
dependencies:
buffer: 6.0.3
neo4j-driver-core: 5.28.3
string_decoder: 1.3.0
neo4j-driver-core@5.28.3: {}
neo4j-driver@5.28.3:
dependencies:
neo4j-driver-bolt-connection: 5.28.3
neo4j-driver-core: 5.28.3
rxjs: 7.8.2
netmask@2.0.2: {}
node-addon-api@8.5.0: {}
@@ -10888,6 +10938,10 @@ snapshots:
transitivePeerDependencies:
- supports-color
rxjs@7.8.2:
dependencies:
tslib: 2.8.1
safe-buffer@5.1.2: {}
safe-buffer@5.2.1: {}

127
scripts/update-and-restart.sh Executable file
View File

@@ -0,0 +1,127 @@
#!/usr/bin/env bash
#
# update-and-restart.sh — Pull, build, link, and restart OpenClaw gateway
# Verifies the running gateway matches the built commit.
#
set -euo pipefail
REPO_DIR="$HOME/openclaw"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
log() { echo -e "${CYAN}[$(date '+%H:%M:%S')]${NC} $1"; }
ok() { echo -e "${GREEN}$1${NC}"; }
warn() { echo -e "${YELLOW}⚠️ $1${NC}"; }
fail() { echo -e "${RED}$1${NC}"; exit 1; }
cd "$REPO_DIR" || fail "Cannot cd to $REPO_DIR"
# --- Check for uncommitted changes ---
if ! git diff --quiet || ! git diff --cached --quiet; then
warn "You have uncommitted changes:"
git status --short
echo ""
read -rp "Continue anyway? (y/N) " confirm
[[ "$confirm" =~ ^[Yy]$ ]] || { log "Aborted."; exit 0; }
fi
# --- Record pre-pull state ---
OLD_SHA=$(git rev-parse HEAD)
OLD_SHORT=$(git rev-parse --short HEAD)
log "Current commit: ${OLD_SHORT}"
# --- Git pull (rebase to keep local commits on top) ---
log "Pulling latest changes (rebase)..."
if git pull --rebase 2>&1; then
ok "Git pull complete"
else
warn "Rebase failed — aborting rebase and stopping."
git rebase --abort 2>/dev/null || true
fail "Git pull --rebase failed (conflicts?). Resolve manually."
fi
NEW_SHA=$(git rev-parse HEAD)
NEW_SHORT=$(git rev-parse --short HEAD)
if [ "$OLD_SHA" = "$NEW_SHA" ]; then
log "Already up to date (${NEW_SHORT})"
else
log "Updated: ${OLD_SHORT}${NEW_SHORT}"
echo ""
git --no-pager log --oneline "${OLD_SHA}..${NEW_SHA}" | head -20
echo ""
fi
# --- pnpm install ---
log "Installing dependencies..."
if pnpm install --frozen-lockfile 2>&1; then
ok "pnpm install complete"
else
warn "Frozen lockfile failed, trying regular install..."
pnpm install 2>&1 || fail "pnpm install failed"
ok "pnpm install complete"
fi
# --- pnpm format (check only) ---
log "Checking code formatting..."
pnpm format 2>&1 || fail "Format check failed — run 'pnpm exec oxfmt --write <file>' to fix"
ok "Format check passed"
# --- pnpm build ---
log "Building TypeScript..."
pnpm build 2>&1 || fail "pnpm build failed"
ok "Build complete"
# --- pnpm lint ---
log "Running linter..."
pnpm lint 2>&1 || fail "Lint check failed — run 'pnpm exec oxlint <file>' to fix"
ok "Lint check passed"
# --- pnpm link ---
log "Linking globally..."
pnpm link --global 2>&1 || fail "pnpm link --global failed"
ok "Linked globally"
# --- Capture the commit SHA that was just built ---
BUILT_SHA=$(git rev-parse HEAD)
BUILT_SHORT=$(git rev-parse --short HEAD)
log "Built commit: ${BUILT_SHORT} (${BUILT_SHA})"
# --- Restart gateway ---
log "Restarting gateway..."
openclaw gateway restart 2>&1 || fail "Gateway restart failed"
# --- Wait for gateway to come back ---
log "Waiting for gateway to stabilize..."
sleep 3
# --- Verify the running gateway matches ---
RUNNING_ENTRY=$(openclaw gateway status 2>&1 | grep -oP '(?<=Command: ).*' || true)
# Check commit from the built dist
if [ -f "$REPO_DIR/dist/version.js" ]; then
DIST_SHA=$(grep -oP '[a-f0-9]{40}' "$REPO_DIR/dist/version.js" 2>/dev/null | head -1 || true)
DIST_SHORT="${DIST_SHA:0:7}"
fi
# Verify SHA match
POST_SHA=$(git -C "$REPO_DIR" rev-parse HEAD)
if [ "$BUILT_SHA" = "$POST_SHA" ]; then
ok "Commit verified: built=${BUILT_SHORT}, repo=${POST_SHA:0:7}"
else
fail "SHA MISMATCH! Built ${BUILT_SHORT} but repo is now ${POST_SHA:0:7}"
fi
# --- Summary ---
echo ""
echo -e "${GREEN}════════════════════════════════════════${NC}"
echo -e "${GREEN} OpenClaw updated and restarted!${NC}"
echo -e "${GREEN} Commit: ${BUILT_SHORT}${NC}"
if [ "$OLD_SHA" != "$NEW_SHA" ]; then
COMMIT_COUNT=$(git rev-list --count "${OLD_SHA}..${NEW_SHA}")
echo -e "${GREEN} Changes: ${COMMIT_COUNT} new commit(s)${NC}"
fi
echo -e "${GREEN}════════════════════════════════════════${NC}"

View File

@@ -2,6 +2,7 @@ import type { OpenClawConfig } from "../config/config.js";
import type { AgentBootstrapHookContext } from "../hooks/internal-hooks.js";
import type { WorkspaceBootstrapFile } from "./workspace.js";
import { createInternalHookEvent, triggerInternalHook } from "../hooks/internal-hooks.js";
import { getGlobalHookRunner } from "../plugins/hook-runner-global.js";
import { resolveAgentIdFromSessionKey } from "../routing/session-key.js";
export async function applyBootstrapHookOverrides(params: {
@@ -27,5 +28,30 @@ export async function applyBootstrapHookOverrides(params: {
const event = createInternalHookEvent("agent", "bootstrap", sessionKey, context);
await triggerInternalHook(event);
const updated = (event.context as AgentBootstrapHookContext).bootstrapFiles;
return Array.isArray(updated) ? updated : params.files;
const internalResult = Array.isArray(updated) ? updated : params.files;
// After internal hooks, run plugin hooks
const hookRunner = getGlobalHookRunner();
if (hookRunner?.hasHooks("agent_bootstrap")) {
const result = await hookRunner.runAgentBootstrap(
{
files: internalResult.map((f) => ({
name: f.name,
path: f.path,
content: f.content,
missing: f.missing,
})),
},
{
agentId,
sessionKey: params.sessionKey,
workspaceDir: params.workspaceDir,
},
);
if (result?.files) {
return result.files as WorkspaceBootstrapFile[];
}
}
return internalResult;
}

View File

@@ -1,6 +1,7 @@
import type { AgentMessage } from "@mariozechner/pi-agent-core";
import type { ExtensionContext } from "@mariozechner/pi-coding-agent";
import { estimateTokens, generateSummary } from "@mariozechner/pi-coding-agent";
import { completeSimple } from "@mariozechner/pi-ai";
import { convertToLlm, estimateTokens, serializeConversation } from "@mariozechner/pi-coding-agent";
import { DEFAULT_CONTEXT_TOKENS } from "./defaults.js";
import { repairToolUseResultPairing, stripToolResultDetails } from "./session-transcript-repair.js";
@@ -13,6 +14,163 @@ const MERGE_SUMMARIES_INSTRUCTIONS =
"Merge these partial summaries into a single cohesive summary. Preserve decisions," +
" TODOs, open questions, and any constraints.";
// ---------------------------------------------------------------------------
// Enhanced summarization prompts with "Immediate Context" section
// ---------------------------------------------------------------------------
// These replace the upstream pi-coding-agent prompts to add recency awareness.
// The key addition is "## Immediate Context" which captures what was being
// actively discussed/worked on in the most recent messages, solving the problem
// of losing the "last thing we were doing" after compaction.
const ENHANCED_SUMMARIZATION_SYSTEM_PROMPT =
"You are a context summarization assistant. Your task is to read a conversation " +
"between a user and an AI assistant, then produce a structured summary following " +
"the exact format specified.\n\n" +
"Do NOT continue the conversation. Do NOT respond to any questions in the " +
"conversation. ONLY output the structured summary.";
const ENHANCED_SUMMARIZATION_PROMPT =
"The messages above are a conversation to summarize. Create a structured context " +
"checkpoint summary that another LLM will use to continue the work.\n\n" +
"Use this EXACT format:\n\n" +
"## Immediate Context\n" +
"[What was the user MOST RECENTLY asking about or working on? Describe the active " +
"conversation topic from the last few exchanges in detail. Include any pending " +
"questions, partial results, or the exact state of the task right before this " +
"summary. This section should read like a handoff note: 'You were just working " +
"on X, the user asked Y, and you were in the middle of Z.']\n\n" +
"## Goal\n" +
"[What is the user trying to accomplish? Can be multiple items if the session " +
"covers different tasks.]\n\n" +
"## Constraints & Preferences\n" +
"- [Any constraints, preferences, or requirements mentioned by user]\n" +
'- [Or "(none)" if none were mentioned]\n\n' +
"## Progress\n" +
"### Done\n" +
"- [x] [Completed tasks/changes]\n\n" +
"### In Progress\n" +
"- [ ] [Current work]\n\n" +
"### Blocked\n" +
"- [Issues preventing progress, if any]\n\n" +
"## Key Decisions\n" +
"- **[Decision]**: [Brief rationale]\n\n" +
"## Next Steps\n" +
"1. [Ordered list of what should happen next]\n\n" +
"## Critical Context\n" +
"- [Any data, examples, or references needed to continue]\n" +
'- [Or "(none)" if not applicable]\n\n' +
"Keep each section concise. Preserve exact file paths, function names, and error messages.";
const ENHANCED_UPDATE_SUMMARIZATION_PROMPT =
"The messages above are NEW conversation messages to incorporate into the existing " +
"summary provided in <previous-summary> tags.\n\n" +
"Update the existing structured summary with new information. RULES:\n" +
"- REPLACE the Immediate Context section entirely with what the NEWEST messages " +
"are about — this must always reflect the most recent conversation topic\n" +
"- PRESERVE all existing information from the previous summary in other sections\n" +
"- ADD new progress, decisions, and context from the new messages\n" +
'- UPDATE the Progress section: move items from "In Progress" to "Done" when completed\n' +
'- UPDATE "Next Steps" based on what was accomplished\n' +
"- PRESERVE exact file paths, function names, and error messages\n" +
"- If something is no longer relevant, you may remove it\n\n" +
"Use this EXACT format:\n\n" +
"## Immediate Context\n" +
"[What is the conversation CURRENTLY about based on these newest messages? " +
"Describe the active topic, any pending questions, and the exact state of work. " +
"This REPLACES any previous immediate context — always reflect the latest exchanges.]\n\n" +
"## Goal\n" +
"[Preserve existing goals, add new ones if the task expanded]\n\n" +
"## Constraints & Preferences\n" +
"- [Preserve existing, add new ones discovered]\n\n" +
"## Progress\n" +
"### Done\n" +
"- [x] [Include previously done items AND newly completed items]\n\n" +
"### In Progress\n" +
"- [ ] [Current work - update based on progress]\n\n" +
"### Blocked\n" +
"- [Current blockers - remove if resolved]\n\n" +
"## Key Decisions\n" +
"- **[Decision]**: [Brief rationale] (preserve all previous, add new)\n\n" +
"## Next Steps\n" +
"1. [Update based on current state]\n\n" +
"## Critical Context\n" +
"- [Preserve important context, add new if needed]\n\n" +
"Keep each section concise. Preserve exact file paths, function names, and error messages.";
/**
* Enhanced version of generateSummary that includes an "Immediate Context" section
* in the compaction summary. This ensures that the most recent conversation topic
* is prominently captured, solving the "can't remember what we were just doing"
* problem after compaction.
*/
async function generateSummary(
currentMessages: AgentMessage[],
model: NonNullable<ExtensionContext["model"]>,
reserveTokens: number,
apiKey: string,
signal: AbortSignal,
customInstructions?: string,
previousSummary?: string,
): Promise<string> {
const maxTokens = Math.floor(0.8 * reserveTokens);
// Use update prompt if we have a previous summary, otherwise initial prompt
let basePrompt = previousSummary
? ENHANCED_UPDATE_SUMMARIZATION_PROMPT
: ENHANCED_SUMMARIZATION_PROMPT;
if (customInstructions) {
basePrompt = `${basePrompt}\n\nAdditional focus: ${customInstructions}`;
}
// Serialize conversation to text so model doesn't try to continue it
// Use type assertion since convertToLlm accepts AgentMessage[] at runtime
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const llmMessages = convertToLlm(currentMessages as any);
const conversationText = serializeConversation(llmMessages);
// Build the prompt with conversation wrapped in tags
let promptText = `<conversation>\n${conversationText}\n</conversation>\n\n`;
if (previousSummary) {
promptText += `<previous-summary>\n${previousSummary}\n</previous-summary>\n\n`;
}
promptText += basePrompt;
// Build user message for summarization request
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const summarizationMessages: any[] = [
{
role: "user",
content: [{ type: "text", text: promptText }],
timestamp: Date.now(),
},
];
const response = await completeSimple(
model,
{
systemPrompt: ENHANCED_SUMMARIZATION_SYSTEM_PROMPT,
messages: summarizationMessages,
},
{ maxTokens, signal, apiKey, reasoning: "high" },
);
if (response.stopReason === "error") {
throw new Error(
`Summarization failed: ${
(response as { errorMessage?: string }).errorMessage || "Unknown error"
}`,
);
}
// Extract text content from response
const textContent = (response.content as Array<{ type: string; text?: string }>)
.filter((c) => c.type === "text" && c.text)
.map((c) => c.text!)
.join("\n");
return textContent;
}
export function estimateMessagesTokens(messages: AgentMessage[]): number {
// SECURITY: toolResult.details can contain untrusted/verbose payloads; never include in LLM-facing compaction.
const safe = stripToolResultDetails(messages);

View File

@@ -107,5 +107,39 @@ export function lookupContextTokens(modelId?: string): number | undefined {
}
// Best-effort: kick off loading, but don't block.
void loadPromise;
return MODEL_CACHE.get(modelId);
// Try exact match first (only if it contains a slash, i.e., already has provider prefix)
if (modelId.includes("/")) {
const exact = MODEL_CACHE.get(modelId);
if (exact !== undefined) {
return exact;
}
}
// For bare model names (no slash), try common provider prefixes first
// to prefer our custom config over built-in defaults.
// Priority order: prefer anthropic, then openai, then google
const prefixes = ["anthropic", "openai", "google"];
for (const prefix of prefixes) {
const prefixedKey = `${prefix}/${modelId}`;
const prefixed = MODEL_CACHE.get(prefixedKey);
if (prefixed !== undefined) {
return prefixed;
}
}
// Fallback to exact match for bare model names (built-in defaults)
const exact = MODEL_CACHE.get(modelId);
if (exact !== undefined) {
return exact;
}
// Final fallback: any matching suffix
for (const [key, value] of MODEL_CACHE) {
if (key.endsWith(`/${modelId}`)) {
return value;
}
}
return undefined;
}

View File

@@ -297,6 +297,13 @@ export function resolveMemorySearchConfig(
cfg: OpenClawConfig,
agentId: string,
): ResolvedMemorySearchConfig | null {
// Only one memory system can be active at a time.
// When a memory plugin owns the slot, core memory-search is unconditionally disabled.
const memoryPluginSlot = cfg.plugins?.slots?.memory;
if (memoryPluginSlot && memoryPluginSlot !== "none") {
return null;
}
const defaults = cfg.agents?.defaults?.memorySearch;
const overrides = resolveAgentConfig(cfg, agentId)?.memorySearch;
const resolved = mergeConfig(defaults, overrides, agentId);

View File

@@ -79,6 +79,17 @@ import { splitSdkTools } from "./tool-split.js";
import { describeUnknownError, mapThinkingLevel } from "./utils.js";
import { flushPendingToolResultsAfterIdle } from "./wait-for-idle-before-flush.js";
export const DEFAULT_COMPACTION_INSTRUCTIONS = [
"When summarizing this conversation, prioritize the following:",
"1. Any active or in-progress tasks: include task name, current step, what has been done, what remains, and any pending user decisions.",
"2. Key decisions made and their rationale.",
"3. Exact values that would be needed to resume work: names, URLs, file paths, configuration values, row numbers, IDs.",
"4. What the user was last working on and their most recent request.",
"5. Tool state: any browser sessions, file operations, or API calls in progress.",
"6. If TASKS.md was updated during this conversation, note which tasks changed and their current status.",
"De-prioritize: casual conversation, greetings, completed tasks with no follow-up needed, resolved errors.",
].join("\n");
export type CompactEmbeddedPiSessionParams = {
sessionId: string;
runId?: string;
@@ -585,6 +596,48 @@ export async function compactEmbeddedPiSessionDirect(
if (limited.length > 0) {
session.agent.replaceMessages(limited);
}
// Pre-check: detect "already compacted but context is high" scenario
// The SDK rejects compaction if the last entry is a compaction, but this is
// too aggressive when context has grown back to threshold levels.
const branchEntries = sessionManager.getBranch();
const lastEntry = branchEntries.length > 0 ? branchEntries[branchEntries.length - 1] : null;
const isLastEntryCompaction = lastEntry?.type === "compaction";
if (isLastEntryCompaction) {
// Check if there's actually new content since the compaction
const compactionIndex = branchEntries.findIndex((e) => e.id === lastEntry.id);
const hasNewContent = branchEntries
.slice(compactionIndex + 1)
.some((e) => e.type === "message" || e.type === "custom_message");
if (!hasNewContent) {
// No new content since last compaction - estimate current context
let currentTokens = 0;
for (const message of session.messages) {
currentTokens += estimateTokens(message);
}
const contextWindow = model.contextWindow ?? 200000;
const contextPercent = (currentTokens / contextWindow) * 100;
// If context is still high (>70%) but no new content, provide clear error
if (contextPercent > 70) {
return {
ok: false,
compacted: false,
reason: `Already compacted • Context ${Math.round(currentTokens / 1000)}k/${Math.round(contextWindow / 1000)}k (${Math.round(contextPercent)}%) — the compaction summary itself is large. Consider starting a new session with /new`,
};
}
// Context is fine, just skip compaction gracefully
return {
ok: true,
compacted: false,
reason: "Already compacted",
};
}
// Has new content - fall through to let SDK handle it (it should work now)
}
// Run before_compaction hooks (fire-and-forget).
// The session JSONL already contains all messages on disk, so plugins
// can read sessionFile asynchronously and process in parallel with

View File

@@ -129,6 +129,54 @@ describe("buildWorkspaceSkillCommandSpecs", () => {
const cmd = commands.find((entry) => entry.skillName === "tool-dispatch");
expect(cmd?.dispatch).toEqual({ kind: "tool", toolName: "sessions_send", argMode: "raw" });
});
it("includes thinking and model from skill config", async () => {
const workspaceDir = await makeWorkspace();
await writeSkill({
dir: path.join(workspaceDir, "skills", "browser"),
name: "browser",
description: "Browser automation",
});
await writeSkill({
dir: path.join(workspaceDir, "skills", "replicate-image"),
name: "replicate-image",
description: "Image generation",
});
await writeSkill({
dir: path.join(workspaceDir, "skills", "no-config"),
name: "no-config",
description: "No special config",
});
const commands = buildWorkspaceSkillCommandSpecs(workspaceDir, {
config: {
skills: {
entries: {
browser: {
thinking: "xhigh",
model: "anthropic/claude-opus-4-5",
},
"replicate-image": {
thinking: "low",
},
},
},
},
});
const browserCmd = commands.find((entry) => entry.skillName === "browser");
const replicateCmd = commands.find((entry) => entry.skillName === "replicate-image");
const noConfigCmd = commands.find((entry) => entry.skillName === "no-config");
expect(browserCmd?.thinking).toBe("xhigh");
expect(browserCmd?.model).toBe("anthropic/claude-opus-4-5");
expect(replicateCmd?.thinking).toBe("low");
expect(replicateCmd?.model).toBeUndefined();
expect(noConfigCmd?.thinking).toBeUndefined();
expect(noConfigCmd?.model).toBeUndefined();
});
});
describe("buildWorkspaceSkillsPrompt", () => {

View File

@@ -54,6 +54,8 @@ export type SkillCommandSpec = {
description: string;
/** Optional deterministic dispatch behavior for this command. */
dispatch?: SkillCommandDispatchSpec;
thinking?: "off" | "minimal" | "low" | "medium" | "high" | "xhigh";
model?: string;
};
export type SkillsInstallPreferences = {

View File

@@ -18,12 +18,13 @@ import { createSubsystemLogger } from "../../logging/subsystem.js";
import { CONFIG_DIR, resolveUserPath } from "../../utils.js";
import { resolveSandboxPath } from "../sandbox-paths.js";
import { resolveBundledSkillsDir } from "./bundled-dir.js";
import { shouldIncludeSkill } from "./config.js";
import { resolveSkillConfig, shouldIncludeSkill } from "./config.js";
import { normalizeSkillFilter } from "./filter.js";
import {
parseFrontmatter,
resolveOpenClawMetadata,
resolveSkillInvocationPolicy,
resolveSkillKey,
} from "./frontmatter.js";
import { resolvePluginSkillDirs } from "./plugin-skills.js";
import { serializeByKey } from "./serialize.js";
@@ -511,11 +512,18 @@ export function buildWorkspaceSkillCommandSpecs(
return { kind: "tool", toolName, argMode: "raw" } as const;
})();
const skillKey = resolveSkillKey(entry.skill, entry);
const skillConfig = resolveSkillConfig(opts?.config, skillKey);
const thinking = skillConfig?.thinking;
const model = skillConfig?.model;
specs.push({
name: unique,
skillName: rawName,
description,
...(dispatch ? { dispatch } : {}),
...(thinking ? { thinking } : {}),
...(model ? { model } : {}),
});
}
return specs;

View File

@@ -73,11 +73,19 @@ function buildUserIdentitySection(ownerLine: string | undefined, isMinimal: bool
return ["## User Identity", ownerLine, ""];
}
function buildTimeSection(params: { userTimezone?: string }) {
function buildTimeSection(params: { userTimezone?: string; userTime?: string }) {
if (!params.userTimezone) {
return [];
}
return ["## Current Date & Time", `Time zone: ${params.userTimezone}`, ""];
const lines = ["## Current Date & Time", `Time zone: ${params.userTimezone}`];
if (params.userTime) {
lines.push(`Current time: ${params.userTime}`);
}
lines.push(
"If you need the current date, time, or day of week, use the session_status tool.",
"",
);
return lines;
}
function buildReplyTagsSection(isMinimal: boolean) {
@@ -340,6 +348,7 @@ export function buildAgentSystemPrompt(params: {
: undefined;
const reasoningLevel = params.reasoningLevel ?? "off";
const userTimezone = params.userTimezone?.trim();
const userTime = params.userTime?.trim();
const skillsPrompt = params.skillsPrompt?.trim();
const heartbeatPrompt = params.heartbeatPrompt?.trim();
const heartbeatPromptLine = heartbeatPrompt
@@ -526,6 +535,7 @@ export function buildAgentSystemPrompt(params: {
...buildUserIdentitySection(ownerLine, isMinimal),
...buildTimeSection({
userTimezone,
userTime,
}),
"## Workspace Files (injected)",
"These user-editable files are loaded by OpenClaw and included below in Project Context.",

View File

@@ -111,8 +111,8 @@ function assertCommandRegistry(commands: ChatCommandDefinition[]): void {
}
for (const alias of command.textAliases) {
if (!alias.startsWith("/")) {
throw new Error(`Command alias missing leading '/': ${alias}`);
if (!alias.startsWith("/") && !alias.startsWith(".")) {
throw new Error(`Command alias missing leading '/' or '.': ${alias}`);
}
const aliasKey = alias.toLowerCase();
if (textAliases.has(aliasKey)) {
@@ -618,6 +618,8 @@ function buildChatCommands(): ChatCommandDefinition[] {
registerAlias(commands, "reasoning", "/reason");
registerAlias(commands, "elevated", "/elev");
registerAlias(commands, "steer", "/tell");
registerAlias(commands, "model", ".model");
registerAlias(commands, "models", ".models");
assertCommandRegistry(commands);
return commands;

View File

@@ -14,7 +14,7 @@ export function extractModelDirective(
}
const modelMatch = body.match(
/(?:^|\s)\/model(?=$|\s|:)\s*:?\s*([A-Za-z0-9_.:@-]+(?:\/[A-Za-z0-9_.:@-]+)*)?/i,
/(?:^|\s)[/.]model(?=$|\s|:)\s*:?\s*([A-Za-z0-9_.:@-]+(?:\/[A-Za-z0-9_.:@-]+)*)?/i,
);
const aliases = (options?.aliases ?? []).map((alias) => alias.trim()).filter(Boolean);
@@ -23,7 +23,7 @@ export function extractModelDirective(
? null
: body.match(
new RegExp(
`(?:^|\\s)\\/(${aliases.map(escapeRegExp).join("|")})(?=$|\\s|:)(?:\\s*:\\s*)?`,
`(?:^|\\s)[/.](${aliases.map(escapeRegExp).join("|")})(?=$|\\s|:)(?:\\s*:\\s*)?`,
"i",
),
);

View File

@@ -23,7 +23,7 @@ const matchLevelDirective = (
names: string[],
): { start: number; end: number; rawLevel?: string } | null => {
const namePattern = names.map(escapeRegExp).join("|");
const match = body.match(new RegExp(`(?:^|\\s)\\/(?:${namePattern})(?=$|\\s|:)`, "i"));
const match = body.match(new RegExp(`(?:^|\\s)[/.](?:${namePattern})(?=$|\\s|:)`, "i"));
if (!match || match.index === undefined) {
return null;
}
@@ -79,7 +79,7 @@ const extractSimpleDirective = (
): { cleaned: string; hasDirective: boolean } => {
const namePattern = names.map(escapeRegExp).join("|");
const match = body.match(
new RegExp(`(?:^|\\s)\\/(?:${namePattern})(?=$|\\s|:)(?:\\s*:\\s*)?`, "i"),
new RegExp(`(?:^|\\s)[/.](?:${namePattern})(?=$|\\s|:)(?:\\s*:\\s*)?`, "i"),
);
const cleaned = match ? body.replace(match[0], " ").replace(/\s+/g, " ").trim() : body.trim();
return {

View File

@@ -172,7 +172,7 @@ export function extractExecDirective(body?: string): ExecDirectiveParse {
invalidNode: false,
};
}
const re = /(?:^|\s)\/exec(?=$|\s|:)/i;
const re = /(?:^|\s)[/.]exec(?=$|\s|:)/i;
const match = re.exec(body);
if (!match) {
return {
@@ -185,8 +185,10 @@ export function extractExecDirective(body?: string): ExecDirectiveParse {
invalidNode: false,
};
}
const start = match.index + match[0].indexOf("/exec");
const argsStart = start + "/exec".length;
// Find the directive start (handle both /exec and .exec)
const execMatch = match[0].match(/[/.]exec/i);
const start = match.index + (execMatch ? match[0].indexOf(execMatch[0]) : 0);
const argsStart = start + 5; // "/exec" or ".exec" is always 5 chars
const parsed = parseExecDirectiveArgs(body.slice(argsStart));
const cleanedRaw = `${body.slice(0, start)} ${body.slice(argsStart + parsed.consumed)}`;
const cleaned = cleanedRaw.replace(/\s+/g, " ").trim();

View File

@@ -262,6 +262,23 @@ export async function handleInlineActions(params: {
sessionCtx.BodyForAgent = rewrittenBody;
sessionCtx.BodyStripped = rewrittenBody;
cleanedBody = rewrittenBody;
// Apply skill-level thinking/model overrides if configured
if (skillInvocation.command.thinking) {
directives = {
...directives,
hasThinkDirective: true,
thinkLevel: skillInvocation.command.thinking,
rawThinkLevel: skillInvocation.command.thinking,
};
}
if (skillInvocation.command.model) {
directives = {
...directives,
hasModelDirective: true,
rawModelDirective: skillInvocation.command.model,
};
}
}
const sendInlineReply = async (reply?: ReplyPayload) => {

View File

@@ -7,6 +7,7 @@ import {
resolveAgentSkillsFilter,
} from "../../agents/agent-scope.js";
import { resolveModelRefFromString } from "../../agents/model-selection.js";
import { compactEmbeddedPiSession } from "../../agents/pi-embedded-runner.js";
import { resolveAgentTimeoutMs } from "../../agents/timeout.js";
import { DEFAULT_AGENT_WORKSPACE_DIR, ensureAgentWorkspace } from "../../agents/workspace.js";
import { type OpenClawConfig, loadConfig } from "../../config/config.js";
@@ -154,6 +155,7 @@ export async function getReplyFromConfig(
sessionId,
isNewSession,
resetTriggered,
compactTriggered,
systemSent,
abortedLastRun,
storePath,
@@ -293,6 +295,35 @@ export async function getReplyFromConfig(
workspaceDir,
});
// Handle compact trigger - force compaction without resetting session
if (compactTriggered && sessionEntry.sessionFile) {
try {
const compactResult = await compactEmbeddedPiSession({
sessionId: sessionEntry.sessionId,
sessionFile: sessionEntry.sessionFile,
config: cfg,
workspaceDir,
provider,
model,
});
if (compactResult.compacted && compactResult.result) {
const tokensBefore = compactResult.result.tokensBefore;
const tokensAfter = compactResult.result.tokensAfter ?? 0;
return {
text: `✅ Context compacted successfully.\n\n**Before:** ${tokensBefore.toLocaleString()} tokens\n**After:** ${tokensAfter.toLocaleString()} tokens\n**Saved:** ${(tokensBefore - tokensAfter).toLocaleString()} tokens`,
};
} else {
return {
text: ` Nothing to compact. ${compactResult.reason ?? "Session is already compact."}`,
};
}
} catch (err) {
return {
text: `❌ Compaction failed: ${String(err)}`,
};
}
}
return runPreparedReply({
ctx,
sessionCtx,

View File

@@ -44,6 +44,7 @@ export type SessionInitResult = {
sessionId: string;
isNewSession: boolean;
resetTriggered: boolean;
compactTriggered: boolean;
systemSent: boolean;
abortedLastRun: boolean;
storePath: string;
@@ -133,6 +134,7 @@ export async function initSessionState(params: {
let systemSent = false;
let abortedLastRun = false;
let resetTriggered = false;
let compactTriggered = false;
let persistedThinking: string | undefined;
let persistedVerbose: string | undefined;
@@ -198,6 +200,22 @@ export async function initSessionState(params: {
}
}
// Check for compact triggers (e.g., ".compact", "/compact")
const compactTriggers = sessionCfg?.compactTriggers ?? [];
if (!resetTriggered && resetAuthorized) {
for (const trigger of compactTriggers) {
if (!trigger) {
continue;
}
const triggerLower = trigger.toLowerCase();
if (trimmedBodyLower === triggerLower || strippedForResetLower === triggerLower) {
compactTriggered = true;
bodyStripped = "";
break;
}
}
}
sessionKey = resolveSessionKey(sessionScope, sessionCtxForState, mainKey);
const entry = sessionStore[sessionKey];
const previousSessionEntry = resetTriggered && entry ? { ...entry } : undefined;
@@ -458,6 +476,7 @@ export async function initSessionState(params: {
sessionId: sessionId ?? crypto.randomUUID(),
isNewSession,
resetTriggered,
compactTriggered,
systemSent,
abortedLastRun,
storePath,

View File

@@ -100,8 +100,9 @@ const MAX_CONSOLE_MESSAGES = 500;
const MAX_PAGE_ERRORS = 200;
const MAX_NETWORK_REQUESTS = 500;
let cached: ConnectedBrowser | null = null;
let connecting: Promise<ConnectedBrowser> | null = null;
// Per-profile caching to allow parallel connections to different Chrome instances
const cachedByUrl = new Map<string, ConnectedBrowser>();
const connectingByUrl = new Map<string, Promise<ConnectedBrowser>>();
function normalizeCdpUrl(raw: string) {
return raw.replace(/\/$/, "");
@@ -315,11 +316,17 @@ function observeBrowser(browser: Browser) {
async function connectBrowser(cdpUrl: string): Promise<ConnectedBrowser> {
const normalized = normalizeCdpUrl(cdpUrl);
if (cached?.cdpUrl === normalized) {
// Check if we already have a cached connection for this specific URL
const cached = cachedByUrl.get(normalized);
if (cached) {
return cached;
}
if (connecting) {
return await connecting;
// Check if there's already a connection in progress for this specific URL
const existingConnecting = connectingByUrl.get(normalized);
if (existingConnecting) {
return await existingConnecting;
}
const connectWithRetry = async (): Promise<ConnectedBrowser> => {
@@ -332,12 +339,12 @@ async function connectBrowser(cdpUrl: string): Promise<ConnectedBrowser> {
const headers = getHeadersWithAuth(endpoint);
const browser = await chromium.connectOverCDP(endpoint, { timeout, headers });
const onDisconnected = () => {
if (cached?.browser === browser) {
cached = null;
if (cachedByUrl.get(normalized)?.browser === browser) {
cachedByUrl.delete(normalized);
}
};
const connected: ConnectedBrowser = { browser, cdpUrl: normalized, onDisconnected };
cached = connected;
cachedByUrl.set(normalized, connected);
browser.on("disconnected", onDisconnected);
observeBrowser(browser);
return connected;
@@ -354,11 +361,12 @@ async function connectBrowser(cdpUrl: string): Promise<ConnectedBrowser> {
throw new Error(message);
};
connecting = connectWithRetry().finally(() => {
connecting = null;
const connectingPromise = connectWithRetry().finally(() => {
connectingByUrl.delete(normalized);
});
connectingByUrl.set(normalized, connectingPromise);
return await connecting;
return await connectingPromise;
}
async function getAllPages(browser: Browser): Promise<Page[]> {
@@ -512,16 +520,16 @@ export function refLocator(page: Page, ref: string) {
}
export async function closePlaywrightBrowserConnection(): Promise<void> {
const cur = cached;
cached = null;
connecting = null;
if (!cur) {
return;
// Close all cached browser connections
const connections = Array.from(cachedByUrl.values());
cachedByUrl.clear();
connectingByUrl.clear();
for (const c of connections) {
if (c.onDisconnected && typeof c.browser.off === "function") {
c.browser.off("disconnected", c.onDisconnected);
}
}
if (cur.onDisconnected && typeof cur.browser.off === "function") {
cur.browser.off("disconnected", cur.onDisconnected);
}
await cur.browser.close().catch(() => {});
await Promise.all(connections.map((c) => c.browser.close().catch(() => {})));
}
function normalizeCdpHttpBaseForJsonEndpoints(cdpUrl: string): string {
@@ -649,31 +657,30 @@ export async function forceDisconnectPlaywrightForTarget(opts: {
reason?: string;
}): Promise<void> {
const normalized = normalizeCdpUrl(opts.cdpUrl);
if (cached?.cdpUrl !== normalized) {
const cur = cachedByUrl.get(normalized);
if (!cur) {
return;
}
const cur = cached;
cached = null;
// Also clear `connecting` so the next call does a fresh connectOverCDP
cachedByUrl.delete(normalized);
// Also clear the connecting promise so the next call does a fresh connectOverCDP
// rather than awaiting a stale promise.
connecting = null;
if (cur) {
// Remove the "disconnected" listener to prevent the old browser's teardown
// from racing with a fresh connection and nulling the new `cached`.
if (cur.onDisconnected && typeof cur.browser.off === "function") {
cur.browser.off("disconnected", cur.onDisconnected);
}
connectingByUrl.delete(normalized);
// Best-effort: kill any stuck JS to unblock the target's execution context before we
// disconnect Playwright's CDP connection.
const targetId = opts.targetId?.trim() || "";
if (targetId) {
await tryTerminateExecutionViaCdp({ cdpUrl: normalized, targetId }).catch(() => {});
}
// Fire-and-forget: don't await because browser.close() may hang on the stuck CDP pipe.
cur.browser.close().catch(() => {});
// Remove the "disconnected" listener to prevent the old browser's teardown
// from racing with a fresh connection and clearing the new cached entry.
if (cur.onDisconnected && typeof cur.browser.off === "function") {
cur.browser.off("disconnected", cur.onDisconnected);
}
// Best-effort: kill any stuck JS to unblock the target's execution context before we
// disconnect Playwright's CDP connection.
const targetId = opts.targetId?.trim() || "";
if (targetId) {
await tryTerminateExecutionViaCdp({ cdpUrl: normalized, targetId }).catch(() => {});
}
// Fire-and-forget: don't await because browser.close() may hang on the stuck CDP pipe.
cur.browser.close().catch(() => {});
}
/**

View File

@@ -1,4 +1,4 @@
import { resolveCommitHash } from "../infra/git-commit.js";
import { resolveCommitHash, resolveUpstreamCommitHash } from "../infra/git-commit.js";
import { visibleWidth } from "../terminal/ansi.js";
import { isRich, theme } from "../terminal/theme.js";
import { pickTagline, type TaglineOptions } from "./tagline.js";
@@ -6,6 +6,7 @@ import { pickTagline, type TaglineOptions } from "./tagline.js";
type BannerOptions = TaglineOptions & {
argv?: string[];
commit?: string | null;
upstreamCommit?: string | null;
columns?: number;
richTty?: boolean;
};
@@ -36,30 +37,33 @@ const hasVersionFlag = (argv: string[]) =>
export function formatCliBannerLine(version: string, options: BannerOptions = {}): string {
const commit = options.commit ?? resolveCommitHash({ env: options.env });
const upstreamCommit = options.upstreamCommit ?? resolveUpstreamCommitHash();
const commitLabel = commit ?? "unknown";
// Show upstream if different from current (indicates local commits ahead)
const showUpstream = upstreamCommit && upstreamCommit !== commit;
const commitDisplay = showUpstream ? `${commitLabel}${upstreamCommit}` : commitLabel;
const tagline = pickTagline(options);
const rich = options.richTty ?? isRich();
const title = "🦞 OpenClaw";
const prefix = "🦞 ";
const columns = options.columns ?? process.stdout.columns ?? 120;
const plainFullLine = `${title} ${version} (${commitLabel}) — ${tagline}`;
const plainFullLine = `${title} ${version} (${commitDisplay}) — ${tagline}`;
const fitsOnOneLine = visibleWidth(plainFullLine) <= columns;
if (rich) {
const commitPart = showUpstream
? `${theme.muted("(")}${commitLabel}${theme.muted(" ← ")}${theme.muted(upstreamCommit)}${theme.muted(")")}`
: theme.muted(`(${commitLabel})`);
if (fitsOnOneLine) {
return `${theme.heading(title)} ${theme.info(version)} ${theme.muted(
`(${commitLabel})`,
)} ${theme.muted("—")} ${theme.accentDim(tagline)}`;
return `${theme.heading(title)} ${theme.info(version)} ${commitPart} ${theme.muted("—")} ${theme.accentDim(tagline)}`;
}
const line1 = `${theme.heading(title)} ${theme.info(version)} ${theme.muted(
`(${commitLabel})`,
)}`;
const line1 = `${theme.heading(title)} ${theme.info(version)} ${commitPart}`;
const line2 = `${" ".repeat(prefix.length)}${theme.accentDim(tagline)}`;
return `${line1}\n${line2}`;
}
if (fitsOnOneLine) {
return plainFullLine;
}
const line1 = `${title} ${version} (${commitLabel})`;
const line1 = `${title} ${version} (${commitDisplay})`;
const line2 = `${" ".repeat(prefix.length)}${tagline}`;
return `${line1}\n${line2}`;
}

View File

@@ -280,11 +280,14 @@ export async function runMemoryStatus(opts: MemoryCommandOptions) {
scan?: MemorySourceScan;
}> = [];
const disabledAgentIds: string[] = [];
for (const agentId of agentIds) {
const managerPurpose = opts.index ? "default" : "status";
await withManager<MemoryManager>({
getManager: () => getMemorySearchManager({ cfg, agentId, purpose: managerPurpose }),
onMissing: (error) => defaultRuntime.log(error ?? "Memory search disabled."),
onMissing: () => {
disabledAgentIds.push(agentId);
},
onCloseError: (err) =>
defaultRuntime.error(`Memory manager close failed: ${formatErrorMessage(err)}`),
close: async (manager) => {
@@ -374,11 +377,22 @@ export async function runMemoryStatus(opts: MemoryCommandOptions) {
const accent = (text: string) => colorize(rich, theme.accent, text);
const label = (text: string) => muted(`${text}:`);
const emptyAgentIds: string[] = [];
for (const result of allResults) {
const { agentId, status, embeddingProbe, indexError, scan } = result;
const filesIndexed = status.files ?? 0;
const chunksIndexed = status.chunks ?? 0;
const totalFiles = scan?.totalFiles ?? null;
// Skip agents with no indexed content (0 files, 0 chunks, no source files, no errors).
// These agents aren't using the core memory search system — no need to show them.
const isEmpty =
status.files === 0 && status.chunks === 0 && (totalFiles ?? 0) === 0 && !indexError;
if (isEmpty) {
emptyAgentIds.push(agentId);
continue;
}
const indexedLabel =
totalFiles === null
? `${filesIndexed}/? files · ${chunksIndexed} chunks`
@@ -510,6 +524,28 @@ export async function runMemoryStatus(opts: MemoryCommandOptions) {
defaultRuntime.log(lines.join("\n"));
defaultRuntime.log("");
}
// Show compact summary for agents with no indexed memory-search content
if (emptyAgentIds.length > 0) {
const agentList = emptyAgentIds.join(", ");
defaultRuntime.log(
muted(
`Memory Search: ${emptyAgentIds.length} agent${emptyAgentIds.length > 1 ? "s" : ""} with no indexed files (${agentList})`,
),
);
defaultRuntime.log("");
}
// Show compact summary for agents with memory search disabled
if (disabledAgentIds.length > 0 && emptyAgentIds.length === 0) {
const agentList = disabledAgentIds.join(", ");
defaultRuntime.log(
muted(
`Memory Search: disabled for ${disabledAgentIds.length} agent${disabledAgentIds.length > 1 ? "s" : ""} (${agentList})`,
),
);
defaultRuntime.log("");
}
}
export function registerMemoryCli(program: Command) {

View File

@@ -313,9 +313,10 @@ export async function statusCommand(
}
if (!memory) {
const slot = memoryPlugin.slot ? `plugin ${memoryPlugin.slot}` : "plugin";
// Custom (non-built-in) memory plugins can't be probed — show enabled, not unavailable
// External plugins (non memory-core) don't have detailed status available,
// but that doesn't mean they're unavailable - just that we can't query them here
if (memoryPlugin.slot && memoryPlugin.slot !== "memory-core") {
return `enabled (${slot})`;
return muted(`enabled (${slot})`);
}
return muted(`enabled (${slot}) · unavailable`);
}

View File

@@ -0,0 +1,143 @@
/**
* Per-session metadata storage to eliminate lock contention.
*
* Instead of storing all session metadata in a single sessions.json file
* (which requires a global lock), each session gets its own .meta.json file.
* This allows parallel updates without blocking.
*/
import JSON5 from "json5";
import fs from "node:fs/promises";
import path from "node:path";
import type { SessionEntry } from "./types.js";
import { resolveSessionTranscriptsDirForAgent } from "./paths.js";
const META_SUFFIX = ".meta.json";
/**
* Get the path to a session's metadata file.
*/
export function getSessionMetaPath(sessionId: string, agentId?: string): string {
const sessionsDir = resolveSessionTranscriptsDirForAgent(agentId);
return path.join(sessionsDir, `${sessionId}${META_SUFFIX}`);
}
/**
* Load session metadata from per-session file.
* Returns undefined if the file doesn't exist.
*/
export async function loadSessionMeta(
sessionId: string,
agentId?: string,
): Promise<SessionEntry | undefined> {
const metaPath = getSessionMetaPath(sessionId, agentId);
try {
const content = await fs.readFile(metaPath, "utf-8");
const entry = JSON5.parse(content);
return entry;
} catch (err) {
const code = (err as { code?: string }).code;
if (code === "ENOENT") {
return undefined;
}
throw err;
}
}
/**
* Save session metadata to per-session file.
* Uses atomic write (write to temp, then rename) to prevent corruption.
*/
export async function saveSessionMeta(
sessionId: string,
entry: SessionEntry,
agentId?: string,
): Promise<void> {
const metaPath = getSessionMetaPath(sessionId, agentId);
const dir = path.dirname(metaPath);
await fs.mkdir(dir, { recursive: true });
// Atomic write: write to temp file, then rename
const tempPath = `${metaPath}.tmp.${process.pid}.${Date.now()}`;
const content = JSON.stringify(entry, null, 2);
try {
await fs.writeFile(tempPath, content, "utf-8");
await fs.rename(tempPath, metaPath);
} catch (err) {
// Clean up temp file on error
await fs.unlink(tempPath).catch(() => {});
throw err;
}
}
/**
* Update session metadata atomically.
* Reads current state, applies patch, and writes back.
* No lock needed since we use atomic writes and per-session files.
*/
export async function updateSessionMeta(
sessionId: string,
patch: Partial<SessionEntry>,
agentId?: string,
): Promise<SessionEntry> {
const existing = await loadSessionMeta(sessionId, agentId);
const updatedAt = Date.now();
const merged: SessionEntry = {
...existing,
...patch,
sessionId,
updatedAt,
};
await saveSessionMeta(sessionId, merged, agentId);
return merged;
}
/**
* Delete session metadata file.
*/
export async function deleteSessionMeta(sessionId: string, agentId?: string): Promise<void> {
const metaPath = getSessionMetaPath(sessionId, agentId);
await fs.unlink(metaPath).catch((err) => {
if ((err as { code?: string }).code !== "ENOENT") {
throw err;
}
});
}
/**
* List all session metadata files in the sessions directory.
* Returns an array of session IDs.
*/
export async function listSessionMetas(agentId?: string): Promise<string[]> {
const sessionsDir = resolveSessionTranscriptsDirForAgent(agentId);
try {
const files = await fs.readdir(sessionsDir);
return files.filter((f) => f.endsWith(META_SUFFIX)).map((f) => f.slice(0, -META_SUFFIX.length));
} catch (err) {
if ((err as { code?: string }).code === "ENOENT") {
return [];
}
throw err;
}
}
/**
* Load all session metadata from per-session files.
* This is used for backwards compatibility and for building the session index.
*/
export async function loadAllSessionMetas(agentId?: string): Promise<Record<string, SessionEntry>> {
const sessionIds = await listSessionMetas(agentId);
const entries: Record<string, SessionEntry> = {};
await Promise.all(
sessionIds.map(async (sessionId) => {
const entry = await loadSessionMeta(sessionId, agentId);
if (entry) {
entries[sessionId] = entry;
}
}),
);
return entries;
}

View File

@@ -763,21 +763,112 @@ export async function updateSessionStoreEntry(params: {
update: (entry: SessionEntry) => Promise<Partial<SessionEntry> | null>;
}): Promise<SessionEntry | null> {
const { storePath, sessionKey, update } = params;
return await withSessionStoreLock(storePath, async () => {
const store = loadSessionStore(storePath);
const existing = store[sessionKey];
if (!existing) {
return null;
// Fast path: read the store without locking to get the session entry
// The store is cached and TTL-validated, so this is cheap
const store = loadSessionStore(storePath);
const existing = store[sessionKey];
if (!existing) {
return null;
}
// Get the sessionId for per-session file access
const sessionId = existing.sessionId;
if (!sessionId) {
// Fallback to locked update for legacy entries without sessionId
return await withSessionStoreLock(storePath, async () => {
const freshStore = loadSessionStore(storePath, { skipCache: true });
const freshExisting = freshStore[sessionKey];
if (!freshExisting) {
return null;
}
const patch = await update(freshExisting);
if (!patch) {
return freshExisting;
}
const next = mergeSessionEntry(freshExisting, patch);
freshStore[sessionKey] = next;
await saveSessionStoreUnlocked(storePath, freshStore);
return next;
});
}
// Compute the patch
const patch = await update(existing);
if (!patch) {
return existing;
}
// Merge and create the updated entry
const next = mergeSessionEntry(existing, patch);
// Write to per-session meta file (no global lock needed)
const { updateSessionMeta } = await import("./per-session-store.js");
const agentId = extractAgentIdFromStorePath(storePath);
await updateSessionMeta(sessionId, next, agentId);
// Update the in-memory cache so subsequent reads see the update
store[sessionKey] = next;
invalidateSessionStoreCache(storePath);
// Async background sync to sessions.json (debounced, best-effort)
debouncedSyncToSessionsJson(storePath, sessionKey, next);
return next;
}
// Helper to extract agentId from store path
function extractAgentIdFromStorePath(storePath: string): string | undefined {
// storePath is like: ~/.openclaw/agents/{agentId}/sessions/sessions.json
const match = storePath.match(/agents\/([^/]+)\/sessions/);
return match?.[1];
}
// Debounced sync to sessions.json to keep it in sync (background, best-effort)
const pendingSyncs = new Map<string, { sessionKey: string; entry: SessionEntry }>();
let syncTimer: NodeJS.Timeout | null = null;
function debouncedSyncToSessionsJson(
storePath: string,
sessionKey: string,
entry: SessionEntry,
): void {
const key = `${storePath}::${sessionKey}`;
pendingSyncs.set(key, { sessionKey, entry });
if (syncTimer) {
return;
} // Already scheduled
syncTimer = setTimeout(async () => {
syncTimer = null;
const toSync = new Map(pendingSyncs);
pendingSyncs.clear();
// Group by storePath
const byStore = new Map<string, Array<{ sessionKey: string; entry: SessionEntry }>>();
for (const [key, value] of toSync) {
const [sp] = key.split("::");
const list = byStore.get(sp) ?? [];
list.push(value);
byStore.set(sp, list);
}
const patch = await update(existing);
if (!patch) {
return existing;
// Batch update each store
for (const [sp, entries] of byStore) {
try {
await withSessionStoreLock(sp, async () => {
const store = loadSessionStore(sp, { skipCache: true });
for (const { sessionKey: sk, entry: e } of entries) {
store[sk] = e;
}
await saveSessionStoreUnlocked(sp, store);
});
} catch {
// Best-effort sync, ignore errors
}
}
const next = mergeSessionEntry(existing, patch);
store[sessionKey] = next;
await saveSessionStoreUnlocked(storePath, store, { activeSessionKey: sessionKey });
return next;
});
}, 5000); // 5 second debounce
}
export async function recordSessionMetaFromInbound(params: {

View File

@@ -91,6 +91,7 @@ export type SessionConfig = {
/** Map platform-prefixed identities (e.g. "telegram:123") to canonical DM peers. */
identityLinks?: Record<string, string[]>;
resetTriggers?: string[];
compactTriggers?: string[];
idleMinutes?: number;
reset?: SessionResetConfig;
resetByType?: SessionResetByTypeConfig;

View File

@@ -3,6 +3,8 @@ export type SkillConfig = {
apiKey?: string;
env?: Record<string, string>;
config?: Record<string, unknown>;
thinking?: "off" | "minimal" | "low" | "medium" | "high" | "xhigh";
model?: string;
};
export type SkillsLoadConfig = {

View File

@@ -34,6 +34,7 @@ export const SessionSchema = z
.optional(),
identityLinks: z.record(z.string(), z.array(z.string())).optional(),
resetTriggers: z.array(z.string()).optional(),
compactTriggers: z.array(z.string()).optional(),
idleMinutes: z.number().int().positive().optional(),
reset: SessionResetConfigSchema.optional(),
resetByType: z

View File

@@ -588,6 +588,8 @@ export const OpenClawSchema = z
apiKey: z.string().optional().register(sensitive),
env: z.record(z.string(), z.string()).optional(),
config: z.record(z.string(), z.unknown()).optional(),
thinking: z.enum(["off", "minimal", "low", "medium", "high", "xhigh"]).optional(),
model: z.string().optional(),
})
.strict(),
)

View File

@@ -368,6 +368,9 @@ export function normalizeCronJobInput(
stripLegacyTopLevelFields(next);
if (options.applyDefaults) {
if (typeof next.enabled !== "boolean") {
next.enabled = true;
}
if (!next.wakeMode) {
next.wakeMode = "now";
}

View File

@@ -202,11 +202,21 @@ export async function run(state: CronServiceState, id: string, mode?: "due" | "f
return { ok: true, ran: false, reason: "already-running" as const };
}
const now = state.deps.nowMs();
const due = isJobDue(job, now, { forced: mode === "force" });
const forced = mode === "force";
const due = isJobDue(job, now, { forced });
if (!due) {
return { ok: true, ran: false, reason: "not-due" as const };
}
await executeJob(state, job, now, { forced: mode === "force" });
if (forced) {
// Fire-and-forget: don't block the caller waiting for job completion
void executeJob(state, job, now, { forced }).then(() => {
recomputeNextRuns(state);
persist(state).catch(() => {});
armTimer(state);
});
return { ok: true, ran: true } as const;
}
await executeJob(state, job, now, { forced });
recomputeNextRuns(state);
await persist(state);
armTimer(state);

View File

@@ -59,11 +59,14 @@ The hook uses your configured LLM provider to generate slugs, so it works with a
The hook supports optional configuration:
| Option | Type | Default | Description |
| ---------- | ------ | ------- | --------------------------------------------------------------- |
| `messages` | number | 15 | Number of user/assistant messages to include in the memory file |
| Option | Type | Default | Description |
| ---------- | ------ | -------- | ------------------------------------------------------------------------ |
| `messages` | number | 15 | Number of user/assistant messages to include in the memory |
| `target` | string | `"file"` | Storage target: `"file"` (markdown files) or `"lancedb"` (LanceDB store) |
Example configuration:
### File Target (Default)
Saves session context to markdown files in `<workspace>/memory/`:
```json
{
@@ -72,6 +75,7 @@ Example configuration:
"entries": {
"session-memory": {
"enabled": true,
"target": "file",
"messages": 25
}
}
@@ -80,11 +84,41 @@ Example configuration:
}
```
### LanceDB Target
Stores session summaries in LanceDB via the Gateway API instead of creating files:
```json
{
"hooks": {
"internal": {
"entries": {
"session-memory": {
"enabled": true,
"target": "lancedb",
"messages": 15
}
}
}
}
}
```
**LanceDB target features:**
- Stores session context as searchable memory entries
- Automatically truncates conversation content to 2000 chars
- Includes date, time, session key, and LLM-generated slug
- Category: `"fact"`, Importance: `0.7`
- Requires Gateway API with `memory_store` tool available
- Use `memory_recall` to search through stored sessions
The hook automatically:
- Uses your workspace directory (`~/.openclaw/workspace` by default)
- Uses your workspace directory (`~/.openclaw/workspace` by default) for file target
- Uses your configured LLM for slug generation
- Falls back to timestamp slugs if LLM is unavailable
- Uses Gateway API at `localhost:<gateway.port>` with `gateway.auth.token` for LanceDB target
## Disabling

View File

@@ -1,6 +1,6 @@
import fs from "node:fs/promises";
import path from "node:path";
import { beforeAll, describe, expect, it, vi } from "vitest";
import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../../../config/config.js";
import type { HookHandler } from "../../hooks.js";
import { makeTempWorkspace, writeWorkspaceFile } from "../../../test-helpers/workspace.js";
@@ -273,4 +273,263 @@ describe("session-memory hook", () => {
expect(memoryContent).toContain("user: Only message 1");
expect(memoryContent).toContain("assistant: Only message 2");
});
describe("LanceDB target", () => {
let originalFetch: typeof global.fetch;
let fetchCalls: Array<{ url: string; options: RequestInit }>;
beforeEach(() => {
// Mock fetch
fetchCalls = [];
originalFetch = global.fetch;
global.fetch = async (url: string | URL, options?: RequestInit) => {
fetchCalls.push({ url: url.toString(), options: options || {} });
return new Response(JSON.stringify({ success: true }), {
status: 200,
headers: { "Content-Type": "application/json" },
});
};
});
afterEach(() => {
global.fetch = originalFetch;
});
it("calls Gateway API with memory_store when target is lancedb", async () => {
const tempDir = await makeTempWorkspace("openclaw-session-memory-");
const sessionsDir = path.join(tempDir, "sessions");
await fs.mkdir(sessionsDir, { recursive: true });
const sessionContent = createMockSessionContent([
{ role: "user", content: "Test question" },
{ role: "assistant", content: "Test answer" },
]);
const sessionFile = await writeWorkspaceFile({
dir: sessionsDir,
name: "test-session.jsonl",
content: sessionContent,
});
const cfg: OpenClawConfig = {
agents: { defaults: { workspace: tempDir } },
gateway: {
port: 18789,
auth: { token: "test-token-123" },
},
hooks: {
internal: {
entries: {
"session-memory": { enabled: true, target: "lancedb" },
},
},
},
};
const event = createHookEvent("command", "new", "agent:main:main", {
cfg,
previousSessionEntry: {
sessionId: "test-123",
sessionFile,
},
});
await handler(event);
// Verify fetch was called
expect(fetchCalls.length).toBe(1);
expect(fetchCalls[0].url).toBe("http://localhost:18789/tools/invoke");
// Verify headers
const headers = fetchCalls[0].options.headers as Record<string, string>;
expect(headers.Authorization).toBe("Bearer test-token-123");
expect(headers["Content-Type"]).toBe("application/json");
// Verify body
const body = JSON.parse(fetchCalls[0].options.body as string);
expect(body.tool).toBe("memory_store");
expect(body.args.category).toBe("fact");
expect(body.args.importance).toBe(0.7);
expect(body.args.text).toContain("user: Test question");
expect(body.args.text).toContain("assistant: Test answer");
});
it("truncates content to 2000 chars for lancedb", async () => {
const tempDir = await makeTempWorkspace("openclaw-session-memory-");
const sessionsDir = path.join(tempDir, "sessions");
await fs.mkdir(sessionsDir, { recursive: true });
// Create a long message
const longContent = "A".repeat(2500);
const sessionContent = createMockSessionContent([{ role: "user", content: longContent }]);
const sessionFile = await writeWorkspaceFile({
dir: sessionsDir,
name: "test-session.jsonl",
content: sessionContent,
});
const cfg: OpenClawConfig = {
agents: { defaults: { workspace: tempDir } },
gateway: {
port: 18789,
auth: { token: "test-token" },
},
hooks: {
internal: {
entries: {
"session-memory": { enabled: true, target: "lancedb" },
},
},
},
};
const event = createHookEvent("command", "new", "agent:main:main", {
cfg,
previousSessionEntry: {
sessionId: "test-123",
sessionFile,
},
});
await handler(event);
// Verify content was truncated
const body = JSON.parse(fetchCalls[0].options.body as string);
expect(body.args.text).toContain("[...truncated to 2000 chars]");
// Full content would be > 2000, but text should be <= 2000 + metadata + truncation notice
const contentLines = body.args.text.split("\n");
const conversationStart = contentLines.findIndex((line: string) => line.includes("user:"));
const conversationText = contentLines.slice(conversationStart).join("\n");
// The conversation part should be truncated at 2000 chars
expect(conversationText.length).toBeLessThan(2100); // 2000 + some overhead for truncation message
});
it("does not create memory file when target is lancedb", async () => {
const tempDir = await makeTempWorkspace("openclaw-session-memory-");
const sessionsDir = path.join(tempDir, "sessions");
await fs.mkdir(sessionsDir, { recursive: true });
const sessionContent = createMockSessionContent([{ role: "user", content: "Test" }]);
const sessionFile = await writeWorkspaceFile({
dir: sessionsDir,
name: "test-session.jsonl",
content: sessionContent,
});
const cfg: OpenClawConfig = {
agents: { defaults: { workspace: tempDir } },
gateway: {
port: 18789,
auth: { token: "test-token" },
},
hooks: {
internal: {
entries: {
"session-memory": { enabled: true, target: "lancedb" },
},
},
},
};
const event = createHookEvent("command", "new", "agent:main:main", {
cfg,
previousSessionEntry: {
sessionId: "test-123",
sessionFile,
},
});
await handler(event);
// Memory directory should not be created
const memoryDir = path.join(tempDir, "memory");
await expect(fs.access(memoryDir)).rejects.toThrow();
});
it("handles Gateway API errors gracefully", async () => {
const tempDir = await makeTempWorkspace("openclaw-session-memory-");
const sessionsDir = path.join(tempDir, "sessions");
await fs.mkdir(sessionsDir, { recursive: true });
const sessionContent = createMockSessionContent([{ role: "user", content: "Test" }]);
const sessionFile = await writeWorkspaceFile({
dir: sessionsDir,
name: "test-session.jsonl",
content: sessionContent,
});
// Mock fetch to return error
global.fetch = async () => {
return new Response("Gateway error", { status: 500 });
};
const cfg: OpenClawConfig = {
agents: { defaults: { workspace: tempDir } },
gateway: {
port: 18789,
auth: { token: "test-token" },
},
hooks: {
internal: {
entries: {
"session-memory": { enabled: true, target: "lancedb" },
},
},
},
};
const event = createHookEvent("command", "new", "agent:main:main", {
cfg,
previousSessionEntry: {
sessionId: "test-123",
sessionFile,
},
});
// Should not throw - errors are logged and caught
await expect(handler(event)).resolves.toBeUndefined();
});
it("defaults to file target when target config is invalid", async () => {
const tempDir = await makeTempWorkspace("openclaw-session-memory-");
const sessionsDir = path.join(tempDir, "sessions");
await fs.mkdir(sessionsDir, { recursive: true });
const sessionContent = createMockSessionContent([{ role: "user", content: "Test" }]);
const sessionFile = await writeWorkspaceFile({
dir: sessionsDir,
name: "test-session.jsonl",
content: sessionContent,
});
const cfg: OpenClawConfig = {
agents: { defaults: { workspace: tempDir } },
hooks: {
internal: {
entries: {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
"session-memory": { enabled: true, target: "invalid" as any },
},
},
},
};
const event = createHookEvent("command", "new", "agent:main:main", {
cfg,
previousSessionEntry: {
sessionId: "test-123",
sessionFile,
},
});
await handler(event);
// Should fall back to file target
const memoryDir = path.join(tempDir, "memory");
const files = await fs.readdir(memoryDir);
expect(files.length).toBe(1);
// Fetch should not have been called
expect(fetchCalls.length).toBe(0);
});
});
});

View File

@@ -67,6 +67,67 @@ async function getRecentSessionContent(
}
}
/**
* Save session to LanceDB via Gateway API
*/
async function saveToLanceDB(params: {
cfg: OpenClawConfig;
sessionKey: string;
slug: string;
sessionContent: string;
timestamp: Date;
}): Promise<void> {
const { cfg, sessionKey, slug, sessionContent, timestamp } = params;
// Get gateway config
const gatewayPort = cfg.gateway?.port || 18789;
const gatewayToken = cfg.gateway?.auth?.token;
if (!gatewayToken) {
throw new Error("Gateway auth token not found in config");
}
// Format memory text with metadata and truncated content
const dateStr = timestamp.toISOString().split("T")[0];
const timeStr = timestamp.toISOString().split("T")[1].split(".")[0];
const truncatedContent = sessionContent.slice(0, 2000);
const wasTruncated = sessionContent.length > 2000;
const memoryText = [
`Session: ${slug}`,
`Date: ${dateStr} ${timeStr} UTC`,
`Session Key: ${sessionKey}`,
"",
truncatedContent,
wasTruncated ? "\n[...truncated to 2000 chars]" : "",
].join("\n");
// Call Gateway API to invoke memory_store
const apiUrl = `http://localhost:${gatewayPort}/tools/invoke`;
const response = await fetch(apiUrl, {
method: "POST",
headers: {
Authorization: `Bearer ${gatewayToken}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
tool: "memory_store",
args: {
text: memoryText,
importance: 0.7,
category: "fact",
},
}),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Gateway API call failed: ${response.status} ${errorText}`);
}
log.debug("Successfully stored to LanceDB via Gateway API");
}
/**
* Save session context to memory when /new command is triggered
*/
@@ -85,8 +146,6 @@ const saveSessionToMemory: HookHandler = async (event) => {
const workspaceDir = cfg
? resolveAgentWorkspaceDir(cfg, agentId)
: path.join(resolveStateDir(process.env, os.homedir), "workspace");
const memoryDir = path.join(workspaceDir, "memory");
await fs.mkdir(memoryDir, { recursive: true });
// Get today's date for filename
const now = new Date(event.timestamp);
@@ -108,12 +167,15 @@ const saveSessionToMemory: HookHandler = async (event) => {
const sessionFile = currentSessionFile || undefined;
// Read message count from hook config (default: 15)
// Read hook config (default: 15 messages, file target)
const hookConfig = resolveHookConfig(cfg, "session-memory");
const messageCount =
typeof hookConfig?.messages === "number" && hookConfig.messages > 0
? hookConfig.messages
: 15;
const target = hookConfig?.target === "lancedb" ? "lancedb" : "file";
log.debug("Storage target resolved", { target });
let slug: string | null = null;
let sessionContent: string | null = null;
@@ -149,45 +211,70 @@ const saveSessionToMemory: HookHandler = async (event) => {
log.debug("Using fallback timestamp slug", { slug });
}
// Create filename with date and slug
const filename = `${dateStr}-${slug}.md`;
const memoryFilePath = path.join(memoryDir, filename);
log.debug("Memory file path resolved", {
filename,
path: memoryFilePath.replace(os.homedir(), "~"),
});
// Route to appropriate storage target
if (target === "lancedb") {
// Store in LanceDB via Gateway API
if (!cfg) {
throw new Error("Config not available for LanceDB storage");
}
if (!sessionContent) {
log.debug("No session content available, skipping LanceDB storage");
return;
}
// Format time as HH:MM:SS UTC
const timeStr = now.toISOString().split("T")[1].split(".")[0];
await saveToLanceDB({
cfg,
sessionKey: event.sessionKey,
slug,
sessionContent,
timestamp: now,
});
log.info(`Session context stored in LanceDB: ${slug}`);
} else {
// Store in file (default behavior)
const memoryDir = path.join(workspaceDir, "memory");
await fs.mkdir(memoryDir, { recursive: true });
// Extract context details
const sessionId = (sessionEntry.sessionId as string) || "unknown";
const source = (context.commandSource as string) || "unknown";
// Create filename with date and slug
const filename = `${dateStr}-${slug}.md`;
const memoryFilePath = path.join(memoryDir, filename);
log.debug("Memory file path resolved", {
filename,
path: memoryFilePath.replace(os.homedir(), "~"),
});
// Build Markdown entry
const entryParts = [
`# Session: ${dateStr} ${timeStr} UTC`,
"",
`- **Session Key**: ${event.sessionKey}`,
`- **Session ID**: ${sessionId}`,
`- **Source**: ${source}`,
"",
];
// Format time as HH:MM:SS UTC
const timeStr = now.toISOString().split("T")[1].split(".")[0];
// Include conversation content if available
if (sessionContent) {
entryParts.push("## Conversation Summary", "", sessionContent, "");
// Extract context details
const sessionId = (sessionEntry.sessionId as string) || "unknown";
const source = (context.commandSource as string) || "unknown";
// Build Markdown entry
const entryParts = [
`# Session: ${dateStr} ${timeStr} UTC`,
"",
`- **Session Key**: ${event.sessionKey}`,
`- **Session ID**: ${sessionId}`,
`- **Source**: ${source}`,
"",
];
// Include conversation content if available
if (sessionContent) {
entryParts.push("## Conversation Summary", "", sessionContent, "");
}
const entry = entryParts.join("\n");
// Write to new memory file
await fs.writeFile(memoryFilePath, entry, "utf-8");
log.debug("Memory file written successfully");
// Log completion (but don't send user-visible confirmation - it's internal housekeeping)
const relPath = memoryFilePath.replace(os.homedir(), "~");
log.info(`Session context saved to ${relPath}`);
}
const entry = entryParts.join("\n");
// Write to new memory file
await fs.writeFile(memoryFilePath, entry, "utf-8");
log.debug("Memory file written successfully");
// Log completion (but don't send user-visible confirmation - it's internal housekeeping)
const relPath = memoryFilePath.replace(os.homedir(), "~");
log.info(`Session context saved to ${relPath}`);
} catch (err) {
if (err instanceof Error) {
log.error("Failed to save session memory", {

View File

@@ -80,6 +80,105 @@ const readCommitFromBuildInfo = () => {
}
};
/**
* Resolve the commit hash of the upstream tracking branch (e.g., origin/main).
* Returns null if not in a git repo or no upstream is configured.
*/
export const resolveUpstreamCommitHash = (options: { cwd?: string } = {}): string | null => {
try {
const gitDir = resolveGitDir(options.cwd ?? process.cwd());
if (!gitDir) {
return null;
}
// Read the current branch name
const headPath = path.join(gitDir, "HEAD");
const head = fs.readFileSync(headPath, "utf-8").trim();
if (!head.startsWith("ref:")) {
return null; // detached HEAD
}
const ref = head.replace(/^ref:\s*/i, "").trim();
const branchName = ref.replace(/^refs\/heads\//, "");
// Read the upstream tracking branch from config
const configPath = path.join(gitDir, "config");
const config = fs.readFileSync(configPath, "utf-8");
// Parse the config to find [branch "branchName"] section
const branchSection = new RegExp(`\\[branch\\s+"${branchName}"\\]([^\\[]+)`, "i");
const match = config.match(branchSection);
if (!match) {
return null;
}
const section = match[1];
const remoteMatch = section.match(/remote\s*=\s*(\S+)/i);
const mergeMatch = section.match(/merge\s*=\s*(\S+)/i);
if (!remoteMatch || !mergeMatch) {
return null;
}
const remote = remoteMatch[1];
const mergeBranch = mergeMatch[1].replace(/^refs\/heads\//, "");
// Read the upstream ref
const upstreamRef = `refs/remotes/${remote}/${mergeBranch}`;
const packedRefsPath = path.join(gitDir, "packed-refs");
const looseRefPath = path.join(gitDir, upstreamRef);
// Try loose ref first
try {
const hash = fs.readFileSync(looseRefPath, "utf-8").trim();
return formatCommit(hash);
} catch {
// Try packed-refs
try {
const packed = fs.readFileSync(packedRefsPath, "utf-8");
const lines = packed.split("\n");
for (const line of lines) {
if (line.startsWith("#") || !line.trim()) {
continue;
}
const [hash, refName] = line.split(/\s+/);
if (refName === upstreamRef) {
return formatCommit(hash);
}
}
} catch {
// No packed-refs
}
}
return null;
} catch {
return null;
}
};
const resolveGitDir = (startDir: string): string | null => {
let current = startDir;
for (let i = 0; i < 12; i += 1) {
const gitPath = path.join(current, ".git");
try {
const stat = fs.statSync(gitPath);
if (stat.isDirectory()) {
return gitPath;
}
if (stat.isFile()) {
const raw = fs.readFileSync(gitPath, "utf-8");
const match = raw.match(/gitdir:\s*(.+)/i);
if (match?.[1]) {
return path.resolve(current, match[1].trim());
}
}
} catch {
// ignore missing .git at this level
}
const parent = path.dirname(current);
if (parent === current) {
break;
}
current = parent;
}
return null;
};
export const resolveCommitHash = (options: { cwd?: string; env?: NodeJS.ProcessEnv } = {}) => {
if (cachedCommit !== undefined) {
return cachedCommit;

View File

@@ -8,6 +8,9 @@ import { loadOpenClawPlugins } from "./loader.js";
const log = createSubsystemLogger("plugins");
// Track which plugins have already registered CLI commands (idempotency guard)
const registeredPluginClis = new Set<string>();
export function registerPluginCliCommands(program: Command, cfg?: OpenClawConfig) {
const config = cfg ?? loadConfig();
const workspaceDir = resolveAgentWorkspaceDir(config, resolveDefaultAgentId(config));
@@ -26,6 +29,10 @@ export function registerPluginCliCommands(program: Command, cfg?: OpenClawConfig
const existingCommands = new Set(program.commands.map((cmd) => cmd.name()));
for (const entry of registry.cliRegistrars) {
// Skip if this plugin's CLI was already registered (idempotency)
if (registeredPluginClis.has(entry.pluginId)) {
continue;
}
if (entry.commands.length > 0) {
const overlaps = entry.commands.filter((command) => existingCommands.has(command));
if (overlaps.length > 0) {
@@ -52,6 +59,8 @@ export function registerPluginCliCommands(program: Command, cfg?: OpenClawConfig
for (const command of entry.commands) {
existingCommands.add(command);
}
// Mark as registered after successful registration
registeredPluginClis.add(entry.pluginId);
} catch (err) {
log.warn(`plugin CLI register failed (${entry.pluginId}): ${String(err)}`);
}

View File

@@ -19,6 +19,9 @@ import type {
PluginHookBeforeResetEvent,
PluginHookBeforeToolCallEvent,
PluginHookBeforeToolCallResult,
PluginHookBootstrapContext,
PluginHookBootstrapEvent,
PluginHookBootstrapResult,
PluginHookGatewayContext,
PluginHookGatewayStartEvent,
PluginHookGatewayStopEvent,
@@ -45,6 +48,9 @@ export type {
PluginHookBeforeAgentStartResult,
PluginHookLlmInputEvent,
PluginHookLlmOutputEvent,
PluginHookBootstrapContext,
PluginHookBootstrapEvent,
PluginHookBootstrapResult,
PluginHookAgentEndEvent,
PluginHookBeforeCompactionEvent,
PluginHookBeforeResetEvent,
@@ -234,6 +240,25 @@ export function createHookRunner(registry: PluginRegistry, options: HookRunnerOp
return runVoidHook("llm_output", event, ctx);
}
/**
* Run agent_bootstrap hook.
* Allows plugins to inject or replace bootstrap files (e.g. virtual MEMORY.md).
* Runs sequentially, merging file lists.
*/
async function runAgentBootstrap(
event: PluginHookBootstrapEvent,
ctx: PluginHookBootstrapContext,
): Promise<PluginHookBootstrapResult | undefined> {
return runModifyingHook<"agent_bootstrap", PluginHookBootstrapResult>(
"agent_bootstrap",
event,
ctx,
(acc, next) => ({
files: next.files ?? acc?.files,
}),
);
}
/**
* Run before_compaction hook.
*/
@@ -483,6 +508,7 @@ export function createHookRunner(registry: PluginRegistry, options: HookRunnerOp
runLlmInput,
runLlmOutput,
runAgentEnd,
runAgentBootstrap,
runBeforeCompaction,
runAfterCompaction,
runBeforeReset,

View File

@@ -300,6 +300,7 @@ export type PluginHookName =
| "llm_input"
| "llm_output"
| "agent_end"
| "agent_bootstrap"
| "before_compaction"
| "after_compaction"
| "before_reset"
@@ -323,6 +324,21 @@ export type PluginHookAgentContext = {
messageProvider?: string;
};
// agent_bootstrap hook
export type PluginHookBootstrapContext = {
agentId?: string;
sessionKey?: string;
workspaceDir?: string;
};
export type PluginHookBootstrapEvent = {
files: Array<{ name: string; path: string; content?: string; missing: boolean }>;
};
export type PluginHookBootstrapResult = {
files?: Array<{ name: string; path: string; content?: string; missing: boolean }>;
};
// before_agent_start hook
export type PluginHookBeforeAgentStartEvent = {
prompt: string;
@@ -535,6 +551,10 @@ export type PluginHookHandlerMap = {
ctx: PluginHookAgentContext,
) => Promise<void> | void;
agent_end: (event: PluginHookAgentEndEvent, ctx: PluginHookAgentContext) => Promise<void> | void;
agent_bootstrap: (
event: PluginHookBootstrapEvent,
ctx: PluginHookBootstrapContext,
) => Promise<PluginHookBootstrapResult | void> | PluginHookBootstrapResult | void;
before_compaction: (
event: PluginHookBeforeCompactionEvent,
ctx: PluginHookAgentContext,

View File

@@ -36,6 +36,15 @@ export function applyModelOverrideToSessionEntry(params: {
}
}
// Clear cached contextTokens when model changes so it gets re-looked up
// from the model catalog with the new model's context window.
// Always clear if contextTokens exists, even if no other fields changed
// (e.g., when resetting to default while already on default).
if (entry.contextTokens !== undefined) {
delete entry.contextTokens;
updated = true;
}
if (profileOverride) {
if (entry.authProfileOverride !== profileOverride) {
entry.authProfileOverride = profileOverride;

View File

@@ -94,4 +94,47 @@ describe("splitShellArgs", () => {
expect(splitShellArgs(`echo "oops`)).toBeNull();
expect(splitShellArgs(`echo 'oops`)).toBeNull();
});
it("returns null for trailing escape", () => {
expect(splitShellArgs("foo bar\\")).toBeNull();
});
it("handles multiple and leading/trailing spaces", () => {
expect(splitShellArgs("foo bar baz")).toEqual(["foo", "bar", "baz"]);
expect(splitShellArgs(" foo bar ")).toEqual(["foo", "bar"]);
});
it("handles escaped spaces outside quotes", () => {
expect(splitShellArgs("foo bar\\ baz qux")).toEqual(["foo", "bar baz", "qux"]);
});
it("handles adjacent quoted and unquoted parts", () => {
expect(splitShellArgs('pre"quoted"post')).toEqual(["prequotedpost"]);
});
it("handles empty and whitespace-only input", () => {
expect(splitShellArgs("")).toEqual([]);
expect(splitShellArgs(" ")).toEqual([]);
});
it("handles quotes inside quotes (different type)", () => {
expect(splitShellArgs(`foo "it's working" bar`)).toEqual(["foo", "it's working", "bar"]);
expect(splitShellArgs(`foo 'he said "hello"' bar`)).toEqual(["foo", 'he said "hello"', "bar"]);
});
it("handles paths with spaces in quotes", () => {
expect(splitShellArgs('cmd "/path/with spaces/file.txt"')).toEqual([
"cmd",
"/path/with spaces/file.txt",
]);
});
it("handles unicode characters", () => {
expect(splitShellArgs("echo 'héllo wörld' 日本語")).toEqual(["echo", "héllo wörld", "日本語"]);
});
it("handles tabs and newlines as whitespace", () => {
expect(splitShellArgs("foo\tbar\tbaz")).toEqual(["foo", "bar", "baz"]);
expect(splitShellArgs("foo\nbar")).toEqual(["foo", "bar"]);
});
});

View File

@@ -1,19 +1,28 @@
import { readFileSync } from "node:fs";
import { defineConfig } from "tsdown";
const pkg = JSON.parse(readFileSync("./package.json", "utf-8"));
const env = {
NODE_ENV: "production",
};
const define = {
__OPENCLAW_VERSION__: JSON.stringify(pkg.version),
};
export default defineConfig([
{
entry: "src/index.ts",
env,
define,
fixedExtension: false,
platform: "node",
},
{
entry: "src/entry.ts",
env,
define,
fixedExtension: false,
platform: "node",
},
@@ -34,6 +43,7 @@ export default defineConfig([
entry: "src/plugin-sdk/index.ts",
outDir: "dist/plugin-sdk",
env,
define,
fixedExtension: false,
platform: "node",
},
@@ -47,6 +57,7 @@ export default defineConfig([
{
entry: "src/extensionAPI.ts",
env,
define,
fixedExtension: false,
platform: "node",
},