mirror of
https://github.com/openclaw/openclaw.git
synced 2026-02-19 18:39:20 -05:00
fix: cap context window resolution (#6187) (thanks @iamEvanYT)
This commit is contained in:
@@ -129,6 +129,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Mentions: honor mentionPatterns even when explicit mentions are present. (#3303) Thanks @HirokiKobayashi-R.
|
||||
- Discord: restore username directory lookup in target resolution. (#3131) Thanks @bonald.
|
||||
- Agents: align MiniMax base URL test expectation with default provider config. (#3131) Thanks @bonald.
|
||||
- Agents: respect configured context window cap for compaction safeguard. (#6187) Thanks @iamEvanYT.
|
||||
- Agents: prevent retries on oversized image errors and surface size limits. (#2871) Thanks @Suksham-sharma.
|
||||
- Agents: inherit provider baseUrl/api for inline models. (#2740) Thanks @lploc94.
|
||||
- Memory Search: keep auto provider model defaults and only include remote when configured. (#2576) Thanks @papago2355.
|
||||
|
||||
@@ -3,36 +3,30 @@ summary: "Session pruning: tool-result trimming to reduce context bloat"
|
||||
read_when:
|
||||
- You want to reduce LLM context growth from tool outputs
|
||||
- You are tuning agents.defaults.contextPruning
|
||||
title: "Session Pruning"
|
||||
---
|
||||
|
||||
# Session Pruning
|
||||
|
||||
Session pruning trims **old tool results** from the in-memory context right before each LLM call. It does **not** rewrite the on-disk session history (`*.jsonl`).
|
||||
|
||||
## When it runs
|
||||
|
||||
- When `mode: "cache-ttl"` is enabled and the last Anthropic call for the session is older than `ttl`.
|
||||
- Only affects the messages sent to the model for that request.
|
||||
- Only active for Anthropic API calls (and OpenRouter Anthropic models).
|
||||
- For best results, match `ttl` to your model `cacheRetention`.
|
||||
- After a prune, the TTL window resets so subsequent requests keep cache until `ttl` expires again.
|
||||
- Only active for Anthropic API calls (and OpenRouter Anthropic models).
|
||||
- For best results, match `ttl` to your model `cacheControlTtl`.
|
||||
- After a prune, the TTL window resets so subsequent requests keep cache until `ttl` expires again.
|
||||
|
||||
## Smart defaults (Anthropic)
|
||||
|
||||
- **OAuth or setup-token** profiles: enable `cache-ttl` pruning and set heartbeat to `1h`.
|
||||
- **API key** profiles: enable `cache-ttl` pruning, set heartbeat to `30m`, and default `cacheRetention: "short"` on Anthropic models.
|
||||
- **API key** profiles: enable `cache-ttl` pruning, set heartbeat to `30m`, and default `cacheControlTtl` to `1h` on Anthropic models.
|
||||
- If you set any of these values explicitly, OpenClaw does **not** override them.
|
||||
|
||||
## What this improves (cost + cache behavior)
|
||||
|
||||
- **Why prune:** Anthropic prompt caching only applies within the TTL. If a session goes idle past the TTL, the next request re-caches the full prompt unless you trim it first.
|
||||
- **What gets cheaper:** pruning reduces the **cacheWrite** size for that first request after the TTL expires.
|
||||
- **Why the TTL reset matters:** once pruning runs, the cache window resets, so follow‑up requests can reuse the freshly cached prompt instead of re-caching the full history again.
|
||||
- **What it does not do:** pruning doesn’t add tokens or “double” costs; it only changes what gets cached on that first post‑TTL request.
|
||||
|
||||
## What can be pruned
|
||||
|
||||
- Only `toolResult` messages.
|
||||
- User + assistant messages are **never** modified.
|
||||
- The last `keepLastAssistants` assistant messages are protected; tool results after that cutoff are not pruned.
|
||||
@@ -40,42 +34,35 @@ Session pruning trims **old tool results** from the in-memory context right befo
|
||||
- Tool results containing **image blocks** are skipped (never trimmed/cleared).
|
||||
|
||||
## Context window estimation
|
||||
Pruning uses an estimated context window (chars ≈ tokens × 4). The base window is resolved in this order:
|
||||
1) `models.providers.*.models[].contextWindow` override.
|
||||
2) Model definition `contextWindow` (from the model registry).
|
||||
3) Default `200000` tokens.
|
||||
|
||||
Pruning uses an estimated context window (chars ≈ tokens × 4). The window size is resolved in this order:
|
||||
|
||||
1. Model definition `contextWindow` (from the model registry).
|
||||
2. `models.providers.*.models[].contextWindow` override.
|
||||
3. `agents.defaults.contextTokens`.
|
||||
4. Default `200000` tokens.
|
||||
If `agents.defaults.contextTokens` is set, it is treated as a cap (min) on the resolved window.
|
||||
|
||||
## Mode
|
||||
|
||||
### cache-ttl
|
||||
|
||||
- Pruning only runs if the last Anthropic call is older than `ttl` (default `5m`).
|
||||
- When it runs: same soft-trim + hard-clear behavior as before.
|
||||
|
||||
## Soft vs hard pruning
|
||||
|
||||
- **Soft-trim**: only for oversized tool results.
|
||||
- Keeps head + tail, inserts `...`, and appends a note with the original size.
|
||||
- Skips results with image blocks.
|
||||
- **Hard-clear**: replaces the entire tool result with `hardClear.placeholder`.
|
||||
|
||||
## Tool selection
|
||||
|
||||
- `tools.allow` / `tools.deny` support `*` wildcards.
|
||||
- Deny wins.
|
||||
- Matching is case-insensitive.
|
||||
- Empty allow list => all tools allowed.
|
||||
|
||||
## Interaction with other limits
|
||||
|
||||
- Built-in tools already truncate their own output; session pruning is an extra layer that prevents long-running chats from accumulating too much tool output in the model context.
|
||||
- Compaction is separate: compaction summarizes and persists, pruning is transient per request. See [/concepts/compaction](/concepts/compaction).
|
||||
|
||||
## Defaults (when enabled)
|
||||
|
||||
- `ttl`: `"5m"`
|
||||
- `keepLastAssistants`: `3`
|
||||
- `softTrimRatio`: `0.3`
|
||||
@@ -85,37 +72,33 @@ Pruning uses an estimated context window (chars ≈ tokens × 4). The window siz
|
||||
- `hardClear`: `{ enabled: true, placeholder: "[Old tool result content cleared]" }`
|
||||
|
||||
## Examples
|
||||
|
||||
Default (off):
|
||||
|
||||
```json5
|
||||
{
|
||||
agent: {
|
||||
contextPruning: { mode: "off" },
|
||||
},
|
||||
contextPruning: { mode: "off" }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Enable TTL-aware pruning:
|
||||
|
||||
```json5
|
||||
{
|
||||
agent: {
|
||||
contextPruning: { mode: "cache-ttl", ttl: "5m" },
|
||||
},
|
||||
contextPruning: { mode: "cache-ttl", ttl: "5m" }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Restrict pruning to specific tools:
|
||||
|
||||
```json5
|
||||
{
|
||||
agent: {
|
||||
contextPruning: {
|
||||
mode: "cache-ttl",
|
||||
tools: { allow: ["exec", "read"], deny: ["*image*"] },
|
||||
},
|
||||
},
|
||||
tools: { allow: ["exec", "read"], deny: ["*image*"] }
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ describe("context-window-guard", () => {
|
||||
cfg,
|
||||
provider: "openrouter",
|
||||
modelId: "tiny",
|
||||
modelContextWindow: undefined,
|
||||
modelContextWindow: 64_000,
|
||||
defaultTokens: 200_000,
|
||||
});
|
||||
const guard = evaluateContextWindowGuard({ info });
|
||||
@@ -85,7 +85,7 @@ describe("context-window-guard", () => {
|
||||
expect(guard.shouldBlock).toBe(true);
|
||||
});
|
||||
|
||||
it("falls back to agents.defaults.contextTokens", () => {
|
||||
it("caps with agents.defaults.contextTokens", () => {
|
||||
const cfg = {
|
||||
agents: { defaults: { contextTokens: 20_000 } },
|
||||
} satisfies OpenClawConfig;
|
||||
@@ -93,7 +93,7 @@ describe("context-window-guard", () => {
|
||||
cfg,
|
||||
provider: "anthropic",
|
||||
modelId: "whatever",
|
||||
modelContextWindow: undefined,
|
||||
modelContextWindow: 200_000,
|
||||
defaultTokens: 200_000,
|
||||
});
|
||||
const guard = evaluateContextWindowGuard({ info });
|
||||
@@ -102,6 +102,21 @@ describe("context-window-guard", () => {
|
||||
expect(guard.shouldBlock).toBe(false);
|
||||
});
|
||||
|
||||
it("does not override when cap exceeds base window", () => {
|
||||
const cfg = {
|
||||
agents: { defaults: { contextTokens: 128_000 } },
|
||||
} satisfies OpenClawConfig;
|
||||
const info = resolveContextWindowInfo({
|
||||
cfg,
|
||||
provider: "anthropic",
|
||||
modelId: "whatever",
|
||||
modelContextWindow: 64_000,
|
||||
defaultTokens: 200_000,
|
||||
});
|
||||
expect(info.source).toBe("model");
|
||||
expect(info.tokens).toBe(64_000);
|
||||
});
|
||||
|
||||
it("uses default when nothing else is available", () => {
|
||||
const info = resolveContextWindowInfo({
|
||||
cfg: undefined,
|
||||
|
||||
@@ -11,9 +11,7 @@ export type ContextWindowInfo = {
|
||||
};
|
||||
|
||||
function normalizePositiveInt(value: unknown): number | null {
|
||||
if (typeof value !== "number" || !Number.isFinite(value)) {
|
||||
return null;
|
||||
}
|
||||
if (typeof value !== "number" || !Number.isFinite(value)) return null;
|
||||
const int = Math.floor(value);
|
||||
return int > 0 ? int : null;
|
||||
}
|
||||
@@ -25,11 +23,6 @@ export function resolveContextWindowInfo(params: {
|
||||
modelContextWindow?: number;
|
||||
defaultTokens: number;
|
||||
}): ContextWindowInfo {
|
||||
const fromModel = normalizePositiveInt(params.modelContextWindow);
|
||||
if (fromModel) {
|
||||
return { tokens: fromModel, source: "model" };
|
||||
}
|
||||
|
||||
const fromModelsConfig = (() => {
|
||||
const providers = params.cfg?.models?.providers as
|
||||
| Record<string, { models?: Array<{ id?: string; contextWindow?: number }> }>
|
||||
@@ -39,16 +32,19 @@ export function resolveContextWindowInfo(params: {
|
||||
const match = models.find((m) => m?.id === params.modelId);
|
||||
return normalizePositiveInt(match?.contextWindow);
|
||||
})();
|
||||
if (fromModelsConfig) {
|
||||
return { tokens: fromModelsConfig, source: "modelsConfig" };
|
||||
const fromModel = normalizePositiveInt(params.modelContextWindow);
|
||||
const baseInfo = fromModelsConfig
|
||||
? { tokens: fromModelsConfig, source: "modelsConfig" as const }
|
||||
: fromModel
|
||||
? { tokens: fromModel, source: "model" as const }
|
||||
: { tokens: Math.floor(params.defaultTokens), source: "default" as const };
|
||||
|
||||
const capTokens = normalizePositiveInt(params.cfg?.agents?.defaults?.contextTokens);
|
||||
if (capTokens && capTokens < baseInfo.tokens) {
|
||||
return { tokens: capTokens, source: "agentContextTokens" };
|
||||
}
|
||||
|
||||
const fromAgentConfig = normalizePositiveInt(params.cfg?.agents?.defaults?.contextTokens);
|
||||
if (fromAgentConfig) {
|
||||
return { tokens: fromAgentConfig, source: "agentContextTokens" };
|
||||
}
|
||||
|
||||
return { tokens: Math.floor(params.defaultTokens), source: "default" };
|
||||
return baseInfo;
|
||||
}
|
||||
|
||||
export type ContextWindowGuardResult = ContextWindowInfo & {
|
||||
|
||||
@@ -197,10 +197,7 @@ export default function compactionSafeguardExtension(api: ExtensionAPI): void {
|
||||
try {
|
||||
const runtime = getCompactionSafeguardRuntime(ctx.sessionManager);
|
||||
const modelContextWindow = resolveContextWindowTokens(model);
|
||||
const contextWindowTokens = Math.min(
|
||||
runtime?.contextWindowTokens ?? modelContextWindow,
|
||||
modelContextWindow,
|
||||
);
|
||||
const contextWindowTokens = runtime?.contextWindowTokens ?? modelContextWindow;
|
||||
const turnPrefixMessages = preparation.turnPrefixMessages ?? [];
|
||||
let messagesToSummarize = preparation.messagesToSummarize;
|
||||
|
||||
|
||||
@@ -124,7 +124,7 @@ export function registerStatusHealthSessionsCommands(program: Command) {
|
||||
["openclaw sessions --json", "Machine-readable output."],
|
||||
["openclaw sessions --store ./tmp/sessions.json", "Use a specific session store."],
|
||||
])}\n\n${theme.muted(
|
||||
"Shows token usage per session when the agent reports it; set agents.defaults.contextTokens to see % of your model window.",
|
||||
"Shows token usage per session when the agent reports it; set agents.defaults.contextTokens to cap the window and show %.",
|
||||
)}`,
|
||||
)
|
||||
.addHelpText(
|
||||
|
||||
@@ -122,7 +122,7 @@ export type AgentDefaultsConfig = {
|
||||
* Include elapsed time in message envelopes ("on" | "off", default: "on").
|
||||
*/
|
||||
envelopeElapsed?: "on" | "off";
|
||||
/** Optional display-only context window override (used for % in status UIs). */
|
||||
/** Optional context window cap (used for runtime estimates + status %). */
|
||||
contextTokens?: number;
|
||||
/** Optional CLI backends for text-only fallback (claude-cli, etc.). */
|
||||
cliBackends?: Record<string, CliBackendConfig>;
|
||||
|
||||
Reference in New Issue
Block a user