diff --git a/src/agents/pi-embedded-helpers.sanitizeuserfacingtext.test.ts b/src/agents/pi-embedded-helpers.sanitizeuserfacingtext.test.ts index 37603c2627..5b42146114 100644 --- a/src/agents/pi-embedded-helpers.sanitizeuserfacingtext.test.ts +++ b/src/agents/pi-embedded-helpers.sanitizeuserfacingtext.test.ts @@ -23,6 +23,29 @@ describe("sanitizeUserFacingText", () => { ); }); + it("sanitizes direct context-overflow errors", () => { + expect( + sanitizeUserFacingText( + "Context overflow: prompt too large for the model. Try again with less input or a larger-context model.", + ), + ).toContain("Context overflow: prompt too large for the model."); + expect(sanitizeUserFacingText("Request size exceeds model context window")).toContain( + "Context overflow: prompt too large for the model.", + ); + }); + + it("does not rewrite conversational mentions of context overflow", () => { + const text = + "nah it failed, hit a context overflow. the prompt was too large for the model. want me to retry it with a different approach?"; + expect(sanitizeUserFacingText(text)).toBe(text); + }); + + it("does not rewrite technical summaries that mention context overflow", () => { + const text = + "Problem: When a subagent reads a very large file, it can exceed the model context window. Auto-compaction cannot help in that case."; + expect(sanitizeUserFacingText(text)).toBe(text); + }); + it("sanitizes raw API error payloads", () => { const raw = '{"type":"error","error":{"message":"Something exploded","type":"server_error"}}'; expect(sanitizeUserFacingText(raw)).toBe("LLM error server_error: Something exploded"); diff --git a/src/agents/pi-embedded-helpers/errors.ts b/src/agents/pi-embedded-helpers/errors.ts index 92a47fd75a..9111020753 100644 --- a/src/agents/pi-embedded-helpers/errors.ts +++ b/src/agents/pi-embedded-helpers/errors.ts @@ -67,6 +67,8 @@ const ERROR_PAYLOAD_PREFIX_RE = const FINAL_TAG_RE = /<\s*\/?\s*final\s*>/gi; const ERROR_PREFIX_RE = /^(?:error|api\s*error|openai\s*error|anthropic\s*error|gateway\s*error|request failed|failed|exception)[:\s-]+/i; +const CONTEXT_OVERFLOW_ERROR_HEAD_RE = + /^(?:context overflow:|request_too_large\b|request size exceeds\b|request exceeds the maximum size\b|context length exceeded\b|maximum context length\b|prompt is too long\b|exceeds model context window\b)/i; const HTTP_STATUS_PREFIX_RE = /^(?:http\s*)?(\d{3})\s+(.+)$/i; const HTTP_ERROR_HINTS = [ "error", @@ -135,6 +137,18 @@ function isLikelyHttpErrorText(raw: string): boolean { return HTTP_ERROR_HINTS.some((hint) => message.includes(hint)); } +function shouldRewriteContextOverflowText(raw: string): boolean { + if (!isContextOverflowError(raw)) { + return false; + } + return ( + isRawApiErrorPayload(raw) || + isLikelyHttpErrorText(raw) || + ERROR_PREFIX_RE.test(raw) || + CONTEXT_OVERFLOW_ERROR_HEAD_RE.test(raw) + ); +} + type ErrorPayload = Record; function isErrorPayloadObject(payload: unknown): payload is ErrorPayload { @@ -403,7 +417,7 @@ export function sanitizeUserFacingText(text: string): string { ); } - if (isContextOverflowError(trimmed)) { + if (shouldRewriteContextOverflowText(trimmed)) { return ( "Context overflow: prompt too large for the model. " + "Try again with less input or a larger-context model."