Files
openclaw/src/auto-reply/status.test.ts
2026-02-18 01:34:35 +00:00

565 lines
16 KiB
TypeScript
Raw Permalink Blame History

This file contains invisible Unicode characters
This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
import fs from "node:fs";
import path from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { normalizeTestText } from "../../test/helpers/normalize-text.js";
import { withTempHome } from "../../test/helpers/temp-home.js";
import type { OpenClawConfig } from "../config/config.js";
import { createSuccessfulImageMediaDecision } from "./media-understanding.test-fixtures.js";
import {
buildCommandsMessage,
buildCommandsMessagePaginated,
buildHelpMessage,
buildStatusMessage,
} from "./status.js";
const { listPluginCommands } = vi.hoisted(() => ({
listPluginCommands: vi.fn(
(): Array<{ name: string; description: string; pluginId: string }> => [],
),
}));
vi.mock("../plugins/commands.js", () => ({
listPluginCommands,
}));
afterEach(() => {
vi.restoreAllMocks();
});
describe("buildStatusMessage", () => {
it("summarizes agent readiness and context usage", () => {
const text = buildStatusMessage({
config: {
models: {
providers: {
anthropic: {
apiKey: "test-key",
models: [
{
id: "pi:opus",
cost: {
input: 1,
output: 1,
cacheRead: 0,
cacheWrite: 0,
},
},
],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "anthropic/pi:opus",
contextTokens: 32_000,
},
sessionEntry: {
sessionId: "abc",
updatedAt: 0,
inputTokens: 1200,
outputTokens: 800,
totalTokens: 16_000,
contextTokens: 32_000,
thinkingLevel: "low",
verboseLevel: "on",
compactionCount: 2,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
resolvedThink: "medium",
resolvedVerbose: "off",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
now: 10 * 60_000, // 10 minutes later
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("OpenClaw");
expect(normalized).toContain("Model: anthropic/pi:opus");
expect(normalized).toContain("api-key");
expect(normalized).toContain("Tokens: 1.2k in / 800 out");
expect(normalized).toContain("Cost: $0.0020");
expect(normalized).toContain("Context: 16k/32k (50%)");
expect(normalized).toContain("Compactions: 2");
expect(normalized).toContain("Session: agent:main:main");
expect(normalized).toContain("updated 10m ago");
expect(normalized).toContain("Runtime: direct");
expect(normalized).toContain("Think: medium");
expect(normalized).not.toContain("verbose");
expect(normalized).toContain("elevated");
expect(normalized).toContain("Queue: collect");
});
it("uses per-agent sandbox config when config and session key are provided", () => {
const text = buildStatusMessage({
config: {
agents: {
list: [
{ id: "main", default: true },
{ id: "discord", sandbox: { mode: "all" } },
],
},
} as unknown as OpenClawConfig,
agent: {},
sessionKey: "agent:discord:discord:channel:1456350065223270435",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
});
expect(normalizeTestText(text)).toContain("Runtime: docker/all");
});
it("shows verbose/elevated labels only when enabled", () => {
const text = buildStatusMessage({
agent: { model: "anthropic/claude-opus-4-5" },
sessionEntry: { sessionId: "v1", updatedAt: 0 },
sessionKey: "agent:main:main",
sessionScope: "per-sender",
resolvedThink: "low",
resolvedVerbose: "on",
resolvedElevated: "on",
queue: { mode: "collect", depth: 0 },
});
expect(text).toContain("verbose");
expect(text).toContain("elevated");
});
it("includes media understanding decisions when present", () => {
const text = buildStatusMessage({
agent: { model: "anthropic/claude-opus-4-5" },
sessionEntry: { sessionId: "media", updatedAt: 0 },
sessionKey: "agent:main:main",
queue: { mode: "none" },
mediaDecisions: [
createSuccessfulImageMediaDecision() as unknown as NonNullable<
Parameters<typeof buildStatusMessage>[0]["mediaDecisions"]
>[number],
{
capability: "audio",
outcome: "skipped",
attachments: [
{
attachmentIndex: 1,
attempts: [
{
type: "provider",
outcome: "skipped",
reason: "maxBytes: too large",
},
],
},
],
},
],
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Media: image ok (openai/gpt-5.2) · audio skipped (maxBytes)");
});
it("omits media line when all decisions are none", () => {
const text = buildStatusMessage({
agent: { model: "anthropic/claude-opus-4-5" },
sessionEntry: { sessionId: "media-none", updatedAt: 0 },
sessionKey: "agent:main:main",
queue: { mode: "none" },
mediaDecisions: [
{ capability: "image", outcome: "no-attachment", attachments: [] },
{ capability: "audio", outcome: "no-attachment", attachments: [] },
{ capability: "video", outcome: "no-attachment", attachments: [] },
],
});
expect(normalizeTestText(text)).not.toContain("Media:");
});
it("does not show elevated label when session explicitly disables it", () => {
const text = buildStatusMessage({
agent: { model: "anthropic/claude-opus-4-5", elevatedDefault: "on" },
sessionEntry: { sessionId: "v1", updatedAt: 0, elevatedLevel: "off" },
sessionKey: "agent:main:main",
sessionScope: "per-sender",
resolvedThink: "low",
resolvedVerbose: "off",
queue: { mode: "collect", depth: 0 },
});
const optionsLine = text.split("\n").find((line) => line.trim().startsWith("⚙️"));
expect(optionsLine).toBeTruthy();
expect(optionsLine).not.toContain("elevated");
});
it("prefers model overrides over last-run model", () => {
const text = buildStatusMessage({
agent: {
model: "anthropic/claude-opus-4-5",
contextTokens: 32_000,
},
sessionEntry: {
sessionId: "override-1",
updatedAt: 0,
providerOverride: "openai",
modelOverride: "gpt-4.1-mini",
modelProvider: "anthropic",
model: "claude-haiku-4-5",
contextTokens: 32_000,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
});
expect(normalizeTestText(text)).toContain("Model: openai/gpt-4.1-mini");
});
it("keeps provider prefix from configured model", () => {
const text = buildStatusMessage({
agent: {
model: "google-antigravity/claude-sonnet-4-5",
},
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
});
expect(normalizeTestText(text)).toContain("Model: google-antigravity/claude-sonnet-4-5");
});
it("handles missing agent config gracefully", () => {
const text = buildStatusMessage({
agent: {},
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Model:");
expect(normalized).toContain("Context:");
expect(normalized).toContain("Queue: collect");
});
it("includes group activation for group sessions", () => {
const text = buildStatusMessage({
agent: {},
sessionEntry: {
sessionId: "g1",
updatedAt: 0,
groupActivation: "always",
chatType: "group",
},
sessionKey: "agent:main:whatsapp:group:123@g.us",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
});
expect(text).toContain("Activation: always");
});
it("shows queue details when overridden", () => {
const text = buildStatusMessage({
agent: {},
sessionEntry: { sessionId: "q1", updatedAt: 0 },
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: {
mode: "collect",
depth: 3,
debounceMs: 2000,
cap: 5,
dropPolicy: "old",
showDetails: true,
},
modelAuth: "api-key",
});
expect(text).toContain("Queue: collect (depth 3 · debounce 2s · cap 5 · drop old)");
});
it("inserts usage summary beneath context line", () => {
const text = buildStatusMessage({
agent: { model: "anthropic/claude-opus-4-5", contextTokens: 32_000 },
sessionEntry: { sessionId: "u1", updatedAt: 0, totalTokens: 1000 },
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
usageLine: "📊 Usage: Claude 80% left (5h)",
modelAuth: "api-key",
});
const lines = normalizeTestText(text).split("\n");
const contextIndex = lines.findIndex((line) => line.includes("Context:"));
expect(contextIndex).toBeGreaterThan(-1);
expect(lines[contextIndex + 1]).toContain("Usage: Claude 80% left (5h)");
});
it("hides cost when not using an API key", () => {
const text = buildStatusMessage({
config: {
models: {
providers: {
anthropic: {
models: [
{
id: "claude-opus-4-5",
cost: {
input: 1,
output: 1,
cacheRead: 0,
cacheWrite: 0,
},
},
],
},
},
},
} as unknown as OpenClawConfig,
agent: { model: "anthropic/claude-opus-4-5" },
sessionEntry: { sessionId: "c1", updatedAt: 0, inputTokens: 10 },
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "oauth",
});
expect(text).not.toContain("💵 Cost:");
});
function writeTranscriptUsageLog(params: {
dir: string;
agentId: string;
sessionId: string;
usage: {
input: number;
output: number;
cacheRead: number;
cacheWrite: number;
totalTokens: number;
};
}) {
const logPath = path.join(
params.dir,
".openclaw",
"agents",
params.agentId,
"sessions",
`${params.sessionId}.jsonl`,
);
fs.mkdirSync(path.dirname(logPath), { recursive: true });
fs.writeFileSync(
logPath,
[
JSON.stringify({
type: "message",
message: {
role: "assistant",
model: "claude-opus-4-5",
usage: params.usage,
},
}),
].join("\n"),
"utf-8",
);
}
const baselineTranscriptUsage = {
input: 1,
output: 2,
cacheRead: 1000,
cacheWrite: 0,
totalTokens: 1003,
} as const;
function writeBaselineTranscriptUsageLog(params: {
dir: string;
agentId: string;
sessionId: string;
}) {
writeTranscriptUsageLog({
...params,
usage: baselineTranscriptUsage,
});
}
function buildTranscriptStatusText(params: { sessionId: string; sessionKey: string }) {
return buildStatusMessage({
agent: {
model: "anthropic/claude-opus-4-5",
contextTokens: 32_000,
},
sessionEntry: {
sessionId: params.sessionId,
updatedAt: 0,
totalTokens: 3,
contextTokens: 32_000,
},
sessionKey: params.sessionKey,
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
includeTranscriptUsage: true,
modelAuth: "api-key",
});
}
it("prefers cached prompt tokens from the session log", async () => {
await withTempHome(
async (dir) => {
const sessionId = "sess-1";
writeBaselineTranscriptUsageLog({
dir,
agentId: "main",
sessionId,
});
const text = buildTranscriptStatusText({
sessionId,
sessionKey: "agent:main:main",
});
expect(normalizeTestText(text)).toContain("Context: 1.0k/32k");
},
{ prefix: "openclaw-status-" },
);
});
it("reads transcript usage for non-default agents", async () => {
await withTempHome(
async (dir) => {
const sessionId = "sess-worker1";
writeBaselineTranscriptUsageLog({
dir,
agentId: "worker1",
sessionId,
});
const text = buildTranscriptStatusText({
sessionId,
sessionKey: "agent:worker1:telegram:12345",
});
expect(normalizeTestText(text)).toContain("Context: 1.0k/32k");
},
{ prefix: "openclaw-status-" },
);
});
it("reads transcript usage using explicit agentId when sessionKey is missing", async () => {
await withTempHome(
async (dir) => {
const sessionId = "sess-worker2";
writeTranscriptUsageLog({
dir,
agentId: "worker2",
sessionId,
usage: {
input: 2,
output: 3,
cacheRead: 1200,
cacheWrite: 0,
totalTokens: 1205,
},
});
const text = buildStatusMessage({
agent: {
model: "anthropic/claude-opus-4-5",
contextTokens: 32_000,
},
agentId: "worker2",
sessionEntry: {
sessionId,
updatedAt: 0,
totalTokens: 5,
contextTokens: 32_000,
},
// Intentionally omitted: sessionKey
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
includeTranscriptUsage: true,
modelAuth: "api-key",
});
expect(normalizeTestText(text)).toContain("Context: 1.2k/32k");
},
{ prefix: "openclaw-status-" },
);
});
});
describe("buildCommandsMessage", () => {
it("lists commands with aliases and hints", () => {
const text = buildCommandsMessage({
commands: { config: false, debug: false },
} as unknown as OpenClawConfig);
expect(text).toContain(" Slash commands");
expect(text).toContain("Status");
expect(text).toContain("/commands - List all slash commands.");
expect(text).toContain("/skill - Run a skill by name.");
expect(text).toContain("/think (/thinking, /t) - Set thinking level.");
expect(text).toContain("/compact - Compact the session context.");
expect(text).not.toContain("/config");
expect(text).not.toContain("/debug");
});
it("includes skill commands when provided", () => {
const text = buildCommandsMessage(
{
commands: { config: false, debug: false },
} as unknown as OpenClawConfig,
[
{
name: "demo_skill",
skillName: "demo-skill",
description: "Demo skill",
},
],
);
expect(text).toContain("/demo_skill - Demo skill");
});
});
describe("buildHelpMessage", () => {
it("hides config/debug when disabled", () => {
const text = buildHelpMessage({
commands: { config: false, debug: false },
} as unknown as OpenClawConfig);
expect(text).toContain("Skills");
expect(text).toContain("/skill <name> [input]");
expect(text).not.toContain("/config");
expect(text).not.toContain("/debug");
});
});
describe("buildCommandsMessagePaginated", () => {
it("formats telegram output with pages", () => {
const result = buildCommandsMessagePaginated(
{
commands: { config: false, debug: false },
} as unknown as OpenClawConfig,
undefined,
{ surface: "telegram", page: 1 },
);
expect(result.text).toContain(" Commands (1/");
expect(result.text).toContain("Session");
expect(result.text).toContain("/stop - Stop the current run.");
});
it("includes plugin commands in the paginated list", () => {
listPluginCommands.mockReturnValue([
{ name: "plugin_cmd", description: "Plugin command", pluginId: "demo-plugin" },
]);
const result = buildCommandsMessagePaginated(
{
commands: { config: false, debug: false },
} as unknown as OpenClawConfig,
undefined,
{ surface: "telegram", page: 99 },
);
expect(result.text).toContain("Plugins");
expect(result.text).toContain("/plugin_cmd (demo-plugin) - Plugin command");
});
});