fix(providers): include provider name in billing error messages (#14697)

Merged via /review-pr -> /prepare-pr -> /merge-pr.

Prepared head SHA: 774e0b6605
Co-authored-by: fagemx <117356295+fagemx@users.noreply.github.com>
Co-authored-by: shakkernerd <165377636+shakkernerd@users.noreply.github.com>
Reviewed-by: @shakkernerd
This commit is contained in:
fagemx
2026-02-13 02:23:27 +08:00
committed by GitHub
parent 5e7842a41d
commit bdd0c12329
6 changed files with 142 additions and 32 deletions

View File

@@ -1,13 +1,36 @@
import type { AssistantMessage } from "@mariozechner/pi-ai";
import { describe, expect, it } from "vitest";
import { BILLING_ERROR_USER_MESSAGE, formatAssistantErrorText } from "./pi-embedded-helpers.js";
import {
BILLING_ERROR_USER_MESSAGE,
formatBillingErrorMessage,
formatAssistantErrorText,
} from "./pi-embedded-helpers.js";
describe("formatAssistantErrorText", () => {
const makeAssistantError = (errorMessage: string): AssistantMessage =>
({
stopReason: "error",
errorMessage,
}) as AssistantMessage;
const makeAssistantError = (errorMessage: string): AssistantMessage => ({
role: "assistant",
api: "openai-responses",
provider: "openai",
model: "test-model",
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
total: 0,
},
},
stopReason: "error",
errorMessage,
content: [{ type: "text", text: errorMessage }],
timestamp: 0,
});
it("returns a friendly message for context overflow", () => {
const msg = makeAssistantError("request_too_large");
@@ -68,4 +91,17 @@ describe("formatAssistantErrorText", () => {
const result = formatAssistantErrorText(msg);
expect(result).toBe(BILLING_ERROR_USER_MESSAGE);
});
it("includes provider name in billing message when provider is given", () => {
const msg = makeAssistantError("insufficient credits");
const result = formatAssistantErrorText(msg, { provider: "Anthropic" });
expect(result).toBe(formatBillingErrorMessage("Anthropic"));
expect(result).toContain("Anthropic");
expect(result).not.toContain("API provider");
});
it("returns generic billing message when provider is not given", () => {
const msg = makeAssistantError("insufficient credits");
const result = formatAssistantErrorText(msg);
expect(result).toContain("API provider");
expect(result).toBe(BILLING_ERROR_USER_MESSAGE);
});
});

View File

@@ -7,6 +7,7 @@ export {
} from "./pi-embedded-helpers/bootstrap.js";
export {
BILLING_ERROR_USER_MESSAGE,
formatBillingErrorMessage,
classifyFailoverReason,
formatRawAssistantErrorForUi,
formatAssistantErrorText,

View File

@@ -3,8 +3,15 @@ import type { OpenClawConfig } from "../../config/config.js";
import type { FailoverReason } from "./types.js";
import { formatSandboxToolPolicyBlockedMessage } from "../sandbox.js";
export const BILLING_ERROR_USER_MESSAGE =
"⚠️ API provider returned a billing error — your API key has run out of credits or has an insufficient balance. Check your provider's billing dashboard and top up or switch to a different API key.";
export function formatBillingErrorMessage(provider?: string): string {
const providerName = provider?.trim();
if (providerName) {
return `⚠️ ${providerName} returned a billing error — your API key has run out of credits or has an insufficient balance. Check your ${providerName} billing dashboard and top up or switch to a different API key.`;
}
return "⚠️ API provider returned a billing error — your API key has run out of credits or has an insufficient balance. Check your provider's billing dashboard and top up or switch to a different API key.";
}
export const BILLING_ERROR_USER_MESSAGE = formatBillingErrorMessage();
export function isContextOverflowError(errorMessage?: string): boolean {
if (!errorMessage) {
@@ -388,7 +395,7 @@ export function formatRawAssistantErrorForUi(raw?: string): string {
export function formatAssistantErrorText(
msg: AssistantMessage,
opts?: { cfg?: OpenClawConfig; sessionKey?: string },
opts?: { cfg?: OpenClawConfig; sessionKey?: string; provider?: string },
): string | undefined {
// Also format errors if errorMessage is present, even if stopReason isn't "error"
const raw = (msg.errorMessage ?? "").trim();
@@ -450,7 +457,7 @@ export function formatAssistantErrorText(
}
if (isBillingErrorMessage(raw)) {
return BILLING_ERROR_USER_MESSAGE;
return formatBillingErrorMessage(opts?.provider);
}
if (isLikelyHttpErrorText(raw) || isRawApiErrorPayload(raw)) {

View File

@@ -28,7 +28,7 @@ import {
import { normalizeProviderId } from "../model-selection.js";
import { ensureOpenClawModelsJson } from "../models-config.js";
import {
BILLING_ERROR_USER_MESSAGE,
formatBillingErrorMessage,
classifyFailoverReason,
formatAssistantErrorText,
isAuthAssistantError,
@@ -484,6 +484,7 @@ export async function runEmbeddedPiAgent(
? formatAssistantErrorText(lastAssistant, {
cfg: params.config,
sessionKey: params.sessionKey ?? params.sessionId,
provider,
})
: undefined;
const assistantErrorText =
@@ -792,6 +793,7 @@ export async function runEmbeddedPiAgent(
? formatAssistantErrorText(lastAssistant, {
cfg: params.config,
sessionKey: params.sessionKey ?? params.sessionId,
provider,
})
: undefined) ||
lastAssistant?.errorMessage?.trim() ||
@@ -800,7 +802,7 @@ export async function runEmbeddedPiAgent(
: rateLimitFailure
? "LLM request rate limited."
: billingFailure
? BILLING_ERROR_USER_MESSAGE
? formatBillingErrorMessage(provider)
: authFailure
? "LLM request unauthorized."
: "LLM request failed.");
@@ -833,6 +835,7 @@ export async function runEmbeddedPiAgent(
lastToolError: attempt.lastToolError,
config: params.config,
sessionKey: params.sessionKey ?? params.sessionId,
provider,
verboseLevel: params.verboseLevel,
reasoningLevel: params.reasoningLevel,
toolResultFormat: resolvedToolResultFormat,

View File

@@ -1,5 +1,6 @@
import type { AssistantMessage } from "@mariozechner/pi-ai";
import { describe, expect, it } from "vitest";
import { formatBillingErrorMessage } from "../../pi-embedded-helpers.js";
import { buildEmbeddedRunPayloads } from "./payloads.js";
describe("buildEmbeddedRunPayloads", () => {
@@ -14,13 +15,31 @@ describe("buildEmbeddedRunPayloads", () => {
},
"request_id": "req_011CX7DwS7tSvggaNHmefwWg"
}`;
const makeAssistant = (overrides: Partial<AssistantMessage>): AssistantMessage =>
({
stopReason: "error",
errorMessage: errorJson,
content: [{ type: "text", text: errorJson }],
...overrides,
}) as AssistantMessage;
const makeAssistant = (overrides: Partial<AssistantMessage>): AssistantMessage => ({
role: "assistant",
api: "openai-responses",
provider: "openai",
model: "test-model",
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
total: 0,
},
},
timestamp: 0,
stopReason: "error",
errorMessage: errorJson,
content: [{ type: "text", text: errorJson }],
...overrides,
});
it("suppresses raw API error JSON when the assistant errored", () => {
const lastAssistant = makeAssistant({});
@@ -80,6 +99,27 @@ describe("buildEmbeddedRunPayloads", () => {
expect(payloads.some((payload) => payload.text?.includes("request_id"))).toBe(false);
});
it("includes provider context for billing errors", () => {
const lastAssistant = makeAssistant({
errorMessage: "insufficient credits",
content: [{ type: "text", text: "insufficient credits" }],
});
const payloads = buildEmbeddedRunPayloads({
assistantTexts: [],
toolMetas: [],
lastAssistant,
sessionKey: "session:telegram",
provider: "Anthropic",
inlineToolResultsAllowed: false,
verboseLevel: "off",
reasoningLevel: "off",
});
expect(payloads).toHaveLength(1);
expect(payloads[0]?.text).toBe(formatBillingErrorMessage("Anthropic"));
expect(payloads[0]?.isError).toBe(true);
});
it("suppresses raw error JSON even when errorMessage is missing", () => {
const lastAssistant = makeAssistant({ errorMessage: undefined });
const payloads = buildEmbeddedRunPayloads({
@@ -98,10 +138,15 @@ describe("buildEmbeddedRunPayloads", () => {
});
it("does not suppress error-shaped JSON when the assistant did not error", () => {
const lastAssistant = makeAssistant({
stopReason: "stop",
errorMessage: undefined,
content: [],
});
const payloads = buildEmbeddedRunPayloads({
assistantTexts: [errorJsonPretty],
toolMetas: [],
lastAssistant: { stopReason: "end_turn" } as AssistantMessage,
lastAssistant,
sessionKey: "session:telegram",
inlineToolResultsAllowed: false,
verboseLevel: "off",
@@ -132,10 +177,15 @@ describe("buildEmbeddedRunPayloads", () => {
});
it("does not add tool error fallback when assistant output exists", () => {
const lastAssistant = makeAssistant({
stopReason: "stop",
errorMessage: undefined,
content: [],
});
const payloads = buildEmbeddedRunPayloads({
assistantTexts: ["All good"],
toolMetas: [],
lastAssistant: { stopReason: "end_turn" } as AssistantMessage,
lastAssistant,
lastToolError: { toolName: "browser", error: "tab not found" },
sessionKey: "session:telegram",
inlineToolResultsAllowed: false,
@@ -149,20 +199,22 @@ describe("buildEmbeddedRunPayloads", () => {
});
it("adds tool error fallback when the assistant only invoked tools", () => {
const lastAssistant = makeAssistant({
stopReason: "toolUse",
errorMessage: undefined,
content: [
{
type: "toolCall",
id: "toolu_01",
name: "exec",
arguments: { command: "echo hi" },
},
],
});
const payloads = buildEmbeddedRunPayloads({
assistantTexts: [],
toolMetas: [],
lastAssistant: {
stopReason: "toolUse",
content: [
{
type: "toolCall",
id: "toolu_01",
name: "exec",
arguments: { command: "echo hi" },
},
],
} as AssistantMessage,
lastAssistant,
lastToolError: { toolName: "exec", error: "Command exited with code 1" },
sessionKey: "session:telegram",
inlineToolResultsAllowed: false,

View File

@@ -6,6 +6,7 @@ import { parseReplyDirectives } from "../../../auto-reply/reply/reply-directives
import { isSilentReplyText, SILENT_REPLY_TOKEN } from "../../../auto-reply/tokens.js";
import { formatToolAggregate } from "../../../auto-reply/tool-meta.js";
import {
BILLING_ERROR_USER_MESSAGE,
formatAssistantErrorText,
formatRawAssistantErrorForUi,
getApiErrorPayloadFingerprint,
@@ -27,6 +28,7 @@ export function buildEmbeddedRunPayloads(params: {
lastToolError?: { toolName: string; meta?: string; error?: string };
config?: OpenClawConfig;
sessionKey: string;
provider?: string;
verboseLevel?: VerboseLevel;
reasoningLevel?: ReasoningLevel;
toolResultFormat?: ToolResultFormat;
@@ -57,6 +59,7 @@ export function buildEmbeddedRunPayloads(params: {
? formatAssistantErrorText(params.lastAssistant, {
cfg: params.config,
sessionKey: params.sessionKey,
provider: params.provider,
})
: undefined;
const rawErrorMessage = lastAssistantErrored
@@ -75,6 +78,7 @@ export function buildEmbeddedRunPayloads(params: {
? normalizeTextForComparison(rawErrorMessage)
: null;
const normalizedErrorText = errorText ? normalizeTextForComparison(errorText) : null;
const normalizedGenericBillingErrorText = normalizeTextForComparison(BILLING_ERROR_USER_MESSAGE);
const genericErrorText = "The AI service returned an error. Please try again.";
if (errorText) {
replyItems.push({ text: errorText, isError: true });
@@ -133,6 +137,13 @@ export function buildEmbeddedRunPayloads(params: {
if (trimmed === genericErrorText) {
return true;
}
if (
normalized &&
normalizedGenericBillingErrorText &&
normalized === normalizedGenericBillingErrorText
) {
return true;
}
}
if (rawErrorMessage && trimmed === rawErrorMessage) {
return true;