fix: prevent false positive context overflow detection in conversation text (#2078)

This commit is contained in:
Stephen Brian King
2026-02-09 00:22:57 -07:00
committed by GitHub
parent 71b4be8799
commit c984e6d8df
4 changed files with 20 additions and 7 deletions

View File

@@ -15,6 +15,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Errors: prevent false positive context overflow detection when conversation mentions "context overflow" topic. (#2078) Thanks @sbking.
- Model failover: treat HTTP 400 errors as failover-eligible, enabling automatic model fallback when providers return bad request errors. (#1879) Thanks @orenyomtov.
- Exec approvals: format forwarded command text as inline/fenced monospace for safer approval scanning across channels. (#11937)
- Config: clamp `maxTokens` to `contextWindow` to prevent invalid model configs. (#5516) Thanks @lailoo.

View File

@@ -46,4 +46,12 @@ describe("isContextOverflowError", () => {
expect(isContextOverflowError("model not found")).toBe(false);
expect(isContextOverflowError("authentication failed")).toBe(false);
});
it("ignores normal conversation text mentioning context overflow", () => {
// These are legitimate conversation snippets, not error messages
expect(isContextOverflowError("Let's investigate the context overflow bug")).toBe(false);
expect(isContextOverflowError("The mystery context overflow errors are strange")).toBe(false);
expect(isContextOverflowError("We're debugging context overflow issues")).toBe(false);
expect(isContextOverflowError("Something is causing context overflow messages")).toBe(false);
});
});

View File

@@ -24,7 +24,7 @@ export function isContextOverflowError(errorMessage?: string): boolean {
lower.includes("prompt is too long") ||
lower.includes("exceeds model context window") ||
(hasRequestSizeExceeds && hasContextWindow) ||
lower.includes("context overflow") ||
lower.includes("context overflow:") ||
(lower.includes("413") && lower.includes("too large"))
);
}

View File

@@ -278,15 +278,21 @@ function resolvePerplexityModel(perplexity?: PerplexityConfig): string {
}
function resolveGrokConfig(search?: WebSearchConfig): GrokConfig {
if (!search || typeof search !== "object") return {};
if (!search || typeof search !== "object") {
return {};
}
const grok = "grok" in search ? search.grok : undefined;
if (!grok || typeof grok !== "object") return {};
if (!grok || typeof grok !== "object") {
return {};
}
return grok as GrokConfig;
}
function resolveGrokApiKey(grok?: GrokConfig): string | undefined {
const fromConfig = normalizeApiKey(grok?.apiKey);
if (fromConfig) return fromConfig;
if (fromConfig) {
return fromConfig;
}
const fromEnv = normalizeApiKey(process.env.XAI_API_KEY);
return fromEnv || undefined;
}
@@ -474,9 +480,7 @@ async function runWebSearch(params: {
? `${params.provider}:${params.query}:${params.count}:${params.country || "default"}:${params.search_lang || "default"}:${params.ui_lang || "default"}:${params.freshness || "default"}`
: params.provider === "perplexity"
? `${params.provider}:${params.query}:${params.perplexityBaseUrl ?? DEFAULT_PERPLEXITY_BASE_URL}:${params.perplexityModel ?? DEFAULT_PERPLEXITY_MODEL}`
: params.provider === "grok"
? `${params.provider}:${params.query}:${params.grokModel ?? DEFAULT_GROK_MODEL}:${params.grokInlineCitations ?? false}`
: `${params.provider}:${params.query}:${params.count}:${params.country || "default"}:${params.search_lang || "default"}:${params.ui_lang || "default"}`,
: `${params.provider}:${params.query}:${params.grokModel ?? DEFAULT_GROK_MODEL}:${String(params.grokInlineCitations ?? false)}`,
);
const cached = readCache(SEARCH_CACHE, cacheKey);
if (cached) {