mirror of
https://github.com/openclaw/openclaw.git
synced 2026-02-19 18:39:20 -05:00
fix: include provider and model name in billing error message (#20510)
Merged via /review-pr -> /prepare-pr -> /merge-pr.
Prepared head SHA: 40dbdf62e8
Co-authored-by: echoVic <16428813+echoVic@users.noreply.github.com>
Co-authored-by: gumadeiras <5599352+gumadeiras@users.noreply.github.com>
Reviewed-by: @gumadeiras
This commit is contained in:
@@ -16,6 +16,7 @@ Docs: https://docs.openclaw.ai
|
||||
### Fixes
|
||||
|
||||
- Gateway/Daemon: forward `TMPDIR` into installed service environments so macOS LaunchAgent gateway runs can open SQLite temp/journal files reliably instead of failing with `SQLITE_CANTOPEN`. (#20512) Thanks @Clawborn.
|
||||
- Agents/Billing: include the active model that produced a billing error in user-facing billing messages (for example, `OpenAI (gpt-5.3)`) across payload, failover, and lifecycle error paths, so users can identify exactly which key needs credits. (#20510) Thanks @echoVic.
|
||||
- iOS/Screen: move `WKWebView` lifecycle ownership into `ScreenWebView` coordinator and explicit attach/detach flow to reduce gesture/lifecycle crash risk (`__NSArrayM insertObject:atIndex:` paths) during screen tab updates. (#20366) Thanks @ngutman.
|
||||
- Gateway/TUI: honor `agents.defaults.blockStreamingDefault` for `chat.send` by removing the hardcoded block-streaming disable override, so replies can use configured block-mode delivery. (#19693) Thanks @neipor.
|
||||
- Protocol/Apple: regenerate Swift gateway models for `push.test` so `pnpm protocol:check` stays green on main. Thanks @mbelinky.
|
||||
|
||||
@@ -92,13 +92,19 @@ describe("formatAssistantErrorText", () => {
|
||||
const result = formatAssistantErrorText(msg);
|
||||
expect(result).toBe(BILLING_ERROR_USER_MESSAGE);
|
||||
});
|
||||
it("includes provider name in billing message when provider is given", () => {
|
||||
it("includes provider and assistant model in billing message when provider is given", () => {
|
||||
const msg = makeAssistantError("insufficient credits");
|
||||
const result = formatAssistantErrorText(msg, { provider: "Anthropic" });
|
||||
expect(result).toBe(formatBillingErrorMessage("Anthropic"));
|
||||
expect(result).toBe(formatBillingErrorMessage("Anthropic", "test-model"));
|
||||
expect(result).toContain("Anthropic");
|
||||
expect(result).not.toContain("API provider");
|
||||
});
|
||||
it("uses the active assistant model for billing message context", () => {
|
||||
const msg = makeAssistantError("insufficient credits");
|
||||
msg.model = "claude-3-5-sonnet";
|
||||
const result = formatAssistantErrorText(msg, { provider: "Anthropic" });
|
||||
expect(result).toBe(formatBillingErrorMessage("Anthropic", "claude-3-5-sonnet"));
|
||||
});
|
||||
it("returns generic billing message when provider is not given", () => {
|
||||
const msg = makeAssistantError("insufficient credits");
|
||||
const result = formatAssistantErrorText(msg);
|
||||
|
||||
@@ -4,10 +4,13 @@ import { formatSandboxToolPolicyBlockedMessage } from "../sandbox.js";
|
||||
import { stableStringify } from "../stable-stringify.js";
|
||||
import type { FailoverReason } from "./types.js";
|
||||
|
||||
export function formatBillingErrorMessage(provider?: string): string {
|
||||
export function formatBillingErrorMessage(provider?: string, model?: string): string {
|
||||
const providerName = provider?.trim();
|
||||
if (providerName) {
|
||||
return `⚠️ ${providerName} returned a billing error — your API key has run out of credits or has an insufficient balance. Check your ${providerName} billing dashboard and top up or switch to a different API key.`;
|
||||
const modelName = model?.trim();
|
||||
const providerLabel =
|
||||
providerName && modelName ? `${providerName} (${modelName})` : providerName || undefined;
|
||||
if (providerLabel) {
|
||||
return `⚠️ ${providerLabel} returned a billing error — your API key has run out of credits or has an insufficient balance. Check your ${providerName} billing dashboard and top up or switch to a different API key.`;
|
||||
}
|
||||
return "⚠️ API provider returned a billing error — your API key has run out of credits or has an insufficient balance. Check your provider's billing dashboard and top up or switch to a different API key.";
|
||||
}
|
||||
@@ -420,7 +423,7 @@ export function formatRawAssistantErrorForUi(raw?: string): string {
|
||||
|
||||
export function formatAssistantErrorText(
|
||||
msg: AssistantMessage,
|
||||
opts?: { cfg?: OpenClawConfig; sessionKey?: string; provider?: string },
|
||||
opts?: { cfg?: OpenClawConfig; sessionKey?: string; provider?: string; model?: string },
|
||||
): string | undefined {
|
||||
// Also format errors if errorMessage is present, even if stopReason isn't "error"
|
||||
const raw = (msg.errorMessage ?? "").trim();
|
||||
@@ -487,7 +490,7 @@ export function formatAssistantErrorText(
|
||||
}
|
||||
|
||||
if (isBillingErrorMessage(raw)) {
|
||||
return formatBillingErrorMessage(opts?.provider);
|
||||
return formatBillingErrorMessage(opts?.provider, opts?.model ?? msg.model);
|
||||
}
|
||||
|
||||
if (isLikelyHttpErrorText(raw) || isRawApiErrorPayload(raw)) {
|
||||
|
||||
@@ -518,6 +518,59 @@ describe("runEmbeddedPiAgent auth profile rotation", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("uses the active erroring model in billing failover errors", async () => {
|
||||
const agentDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-agent-"));
|
||||
const workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-workspace-"));
|
||||
try {
|
||||
await writeAuthStore(agentDir);
|
||||
runEmbeddedAttemptMock.mockResolvedValueOnce(
|
||||
makeAttempt({
|
||||
assistantTexts: [],
|
||||
lastAssistant: buildAssistant({
|
||||
stopReason: "error",
|
||||
errorMessage: "insufficient credits",
|
||||
provider: "openai",
|
||||
model: "mock-rotated",
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
let thrown: unknown;
|
||||
try {
|
||||
await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:test:billing-failover-active-model",
|
||||
sessionFile: path.join(workspaceDir, "session.jsonl"),
|
||||
workspaceDir,
|
||||
agentDir,
|
||||
config: makeConfig({ fallbacks: ["openai/mock-2"] }),
|
||||
prompt: "hello",
|
||||
provider: "openai",
|
||||
model: "mock-1",
|
||||
authProfileId: "openai:p1",
|
||||
authProfileIdSource: "user",
|
||||
timeoutMs: 5_000,
|
||||
runId: "run:billing-failover-active-model",
|
||||
});
|
||||
} catch (err) {
|
||||
thrown = err;
|
||||
}
|
||||
|
||||
expect(thrown).toMatchObject({
|
||||
name: "FailoverError",
|
||||
reason: "billing",
|
||||
provider: "openai",
|
||||
model: "mock-rotated",
|
||||
});
|
||||
expect(thrown).toBeInstanceOf(Error);
|
||||
expect((thrown as Error).message).toContain("openai (mock-rotated) returned a billing error");
|
||||
expect(runEmbeddedAttemptMock).toHaveBeenCalledTimes(1);
|
||||
} finally {
|
||||
await fs.rm(agentDir, { recursive: true, force: true });
|
||||
await fs.rm(workspaceDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("skips profiles in cooldown when rotating after failure", async () => {
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
|
||||
@@ -160,6 +160,17 @@ const toNormalizedUsage = (usage: UsageAccumulator) => {
|
||||
};
|
||||
};
|
||||
|
||||
function resolveActiveErrorContext(params: {
|
||||
lastAssistant: { provider?: string; model?: string } | undefined;
|
||||
provider: string;
|
||||
model: string;
|
||||
}): { provider: string; model: string } {
|
||||
return {
|
||||
provider: params.lastAssistant?.provider ?? params.provider,
|
||||
model: params.lastAssistant?.model ?? params.model,
|
||||
};
|
||||
}
|
||||
|
||||
export async function runEmbeddedPiAgent(
|
||||
params: RunEmbeddedPiAgentParams,
|
||||
): Promise<EmbeddedPiRunResult> {
|
||||
@@ -549,11 +560,17 @@ export async function runEmbeddedPiAgent(
|
||||
const lastTurnTotal = lastAssistantUsage?.total ?? attemptUsage?.total;
|
||||
const attemptCompactionCount = Math.max(0, attempt.compactionCount ?? 0);
|
||||
autoCompactionCount += attemptCompactionCount;
|
||||
const activeErrorContext = resolveActiveErrorContext({
|
||||
lastAssistant,
|
||||
provider,
|
||||
model: modelId,
|
||||
});
|
||||
const formattedAssistantErrorText = lastAssistant
|
||||
? formatAssistantErrorText(lastAssistant, {
|
||||
cfg: params.config,
|
||||
sessionKey: params.sessionKey ?? params.sessionId,
|
||||
provider,
|
||||
provider: activeErrorContext.provider,
|
||||
model: activeErrorContext.model,
|
||||
})
|
||||
: undefined;
|
||||
const assistantErrorText =
|
||||
@@ -919,7 +936,8 @@ export async function runEmbeddedPiAgent(
|
||||
? formatAssistantErrorText(lastAssistant, {
|
||||
cfg: params.config,
|
||||
sessionKey: params.sessionKey ?? params.sessionId,
|
||||
provider,
|
||||
provider: activeErrorContext.provider,
|
||||
model: activeErrorContext.model,
|
||||
})
|
||||
: undefined) ||
|
||||
lastAssistant?.errorMessage?.trim() ||
|
||||
@@ -928,7 +946,10 @@ export async function runEmbeddedPiAgent(
|
||||
: rateLimitFailure
|
||||
? "LLM request rate limited."
|
||||
: billingFailure
|
||||
? formatBillingErrorMessage(provider)
|
||||
? formatBillingErrorMessage(
|
||||
activeErrorContext.provider,
|
||||
activeErrorContext.model,
|
||||
)
|
||||
: authFailure
|
||||
? "LLM request unauthorized."
|
||||
: "LLM request failed.");
|
||||
@@ -937,8 +958,8 @@ export async function runEmbeddedPiAgent(
|
||||
(isTimeoutErrorMessage(message) ? 408 : undefined);
|
||||
throw new FailoverError(message, {
|
||||
reason: assistantFailoverReason ?? "unknown",
|
||||
provider,
|
||||
model: modelId,
|
||||
provider: activeErrorContext.provider,
|
||||
model: activeErrorContext.model,
|
||||
profileId: lastProfileId,
|
||||
status,
|
||||
});
|
||||
@@ -973,7 +994,8 @@ export async function runEmbeddedPiAgent(
|
||||
lastToolError: attempt.lastToolError,
|
||||
config: params.config,
|
||||
sessionKey: params.sessionKey ?? params.sessionId,
|
||||
provider,
|
||||
provider: activeErrorContext.provider,
|
||||
model: activeErrorContext.model,
|
||||
verboseLevel: params.verboseLevel,
|
||||
reasoningLevel: params.reasoningLevel,
|
||||
toolResultFormat: resolvedToolResultFormat,
|
||||
|
||||
@@ -96,17 +96,19 @@ describe("buildEmbeddedRunPayloads", () => {
|
||||
expect(payloads.some((payload) => payload.text?.includes("request_id"))).toBe(false);
|
||||
});
|
||||
|
||||
it("includes provider context for billing errors", () => {
|
||||
it("includes provider and model context for billing errors", () => {
|
||||
const payloads = buildPayloads({
|
||||
lastAssistant: makeAssistant({
|
||||
model: "claude-3-5-sonnet",
|
||||
errorMessage: "insufficient credits",
|
||||
content: [{ type: "text", text: "insufficient credits" }],
|
||||
}),
|
||||
provider: "Anthropic",
|
||||
model: "claude-3-5-sonnet",
|
||||
});
|
||||
|
||||
expect(payloads).toHaveLength(1);
|
||||
expect(payloads[0]?.text).toBe(formatBillingErrorMessage("Anthropic"));
|
||||
expect(payloads[0]?.text).toBe(formatBillingErrorMessage("Anthropic", "claude-3-5-sonnet"));
|
||||
expect(payloads[0]?.isError).toBe(true);
|
||||
});
|
||||
|
||||
|
||||
@@ -72,6 +72,7 @@ export function buildEmbeddedRunPayloads(params: {
|
||||
config?: OpenClawConfig;
|
||||
sessionKey: string;
|
||||
provider?: string;
|
||||
model?: string;
|
||||
verboseLevel?: VerboseLevel;
|
||||
reasoningLevel?: ReasoningLevel;
|
||||
toolResultFormat?: ToolResultFormat;
|
||||
@@ -104,6 +105,7 @@ export function buildEmbeddedRunPayloads(params: {
|
||||
cfg: params.config,
|
||||
sessionKey: params.sessionKey,
|
||||
provider: params.provider,
|
||||
model: params.model,
|
||||
})
|
||||
: undefined;
|
||||
const rawErrorMessage = lastAssistantErrored
|
||||
|
||||
@@ -35,6 +35,8 @@ export function handleAgentEnd(ctx: EmbeddedPiSubscribeContext) {
|
||||
const friendlyError = formatAssistantErrorText(lastAssistant, {
|
||||
cfg: ctx.params.config,
|
||||
sessionKey: ctx.params.sessionKey,
|
||||
provider: lastAssistant.provider,
|
||||
model: lastAssistant.model,
|
||||
});
|
||||
emitAgentEvent({
|
||||
runId: ctx.params.runId,
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
import type { AssistantMessage } from "@mariozechner/pi-ai";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { createStubSessionHarness } from "./pi-embedded-subscribe.e2e-harness.js";
|
||||
import { subscribeEmbeddedPiSession } from "./pi-embedded-subscribe.js";
|
||||
|
||||
describe("subscribeEmbeddedPiSession lifecycle billing errors", () => {
|
||||
it("includes provider and model context in lifecycle billing errors", () => {
|
||||
const { session, emit } = createStubSessionHarness();
|
||||
const onAgentEvent = vi.fn();
|
||||
|
||||
subscribeEmbeddedPiSession({
|
||||
session,
|
||||
runId: "run-billing-error",
|
||||
onAgentEvent,
|
||||
sessionKey: "test-session",
|
||||
});
|
||||
|
||||
const assistantMessage = {
|
||||
role: "assistant",
|
||||
stopReason: "error",
|
||||
errorMessage: "insufficient credits",
|
||||
provider: "Anthropic",
|
||||
model: "claude-3-5-sonnet",
|
||||
} as AssistantMessage;
|
||||
|
||||
emit({ type: "message_update", message: assistantMessage });
|
||||
emit({ type: "agent_end" });
|
||||
|
||||
const lifecycleError = onAgentEvent.mock.calls.find(
|
||||
(call) => call[0]?.stream === "lifecycle" && call[0]?.data?.phase === "error",
|
||||
);
|
||||
expect(lifecycleError).toBeDefined();
|
||||
expect(lifecycleError?.[0]?.data?.error).toContain("Anthropic (claude-3-5-sonnet)");
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user