test: cover cron telemetry and typed fetch mocks

This commit is contained in:
Sebastian
2026-02-17 09:35:31 -05:00
parent f44e3b2a34
commit bd1e7fadd5
3 changed files with 61 additions and 3 deletions

View File

@@ -15,6 +15,7 @@ Docs: https://docs.openclaw.ai
- Discord: expose native `/exec` command options (host/security/ask/node) so Discord slash commands get autocomplete and structured inputs. Thanks @thewilloftheshadow.
- Discord: allow reusable interactive components with `components.reusable=true` so buttons, selects, and forms can be used multiple times before expiring. Thanks @thewilloftheshadow.
- Cron/Gateway: separate per-job webhook delivery (`delivery.mode = "webhook"`) from announce delivery, enforce valid HTTP(S) webhook URLs, and keep a temporary legacy `notify + cron.webhook` fallback for stored jobs. (#17901) Thanks @advaitpaliwal.
- Cron: log per-run model/provider usage telemetry in cron run logs/webhooks and add a local usage report script for aggregating token usage by job. (#18172) Thanks @HankAndTheCrew.
- Discord: add per-button `allowedUsers` allowlist for interactive components to restrict who can click buttons. Thanks @thewilloftheshadow.
- Docker: add optional `OPENCLAW_INSTALL_BROWSER` build arg to preinstall Chromium + Xvfb in the Docker image, avoiding runtime Playwright installs. (#18449)
- Agents/Z.AI: enable `tool_stream` by default for real-time tool call streaming, with opt-out via `params.tool_stream: false`. (#18173) Thanks @tianxiao1430-jpg.

View File

@@ -97,4 +97,55 @@ describe("cron run log", () => {
await fs.rm(dir, { recursive: true, force: true });
});
it("reads telemetry fields", async () => {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cron-log-telemetry-"));
const logPath = path.join(dir, "runs", "job-1.jsonl");
await appendCronRunLog(logPath, {
ts: 1,
jobId: "job-1",
action: "finished",
status: "ok",
model: "gpt-5.2",
provider: "openai",
usage: {
input_tokens: 10,
output_tokens: 5,
total_tokens: 15,
cache_read_tokens: 2,
cache_write_tokens: 1,
},
});
await fs.appendFile(
logPath,
`${JSON.stringify({
ts: 2,
jobId: "job-1",
action: "finished",
status: "ok",
model: " ",
provider: "",
usage: { input_tokens: "oops" },
})}\n`,
"utf-8",
);
const entries = await readCronRunLogEntries(logPath, { limit: 10, jobId: "job-1" });
expect(entries[0]?.model).toBe("gpt-5.2");
expect(entries[0]?.provider).toBe("openai");
expect(entries[0]?.usage).toEqual({
input_tokens: 10,
output_tokens: 5,
total_tokens: 15,
cache_read_tokens: 2,
cache_write_tokens: 1,
});
expect(entries[1]?.model).toBeUndefined();
expect(entries[1]?.provider).toBeUndefined();
expect(entries[1]?.usage?.input_tokens).toBeUndefined();
await fs.rm(dir, { recursive: true, force: true });
});
});

View File

@@ -53,7 +53,9 @@ describe("voyage embedding provider", () => {
expect.objectContaining({ provider: "voyage" }),
);
const [url, init] = fetchMock.mock.calls[0] ?? [];
const call = fetchMock.mock.calls[0];
expect(call).toBeDefined();
const [url, init] = call as [RequestInfo | URL, RequestInit | undefined];
expect(url).toBe("https://api.voyageai.com/v1/embeddings");
const headers = (init?.headers ?? {}) as Record<string, string>;
@@ -86,7 +88,9 @@ describe("voyage embedding provider", () => {
await result.provider.embedQuery("test");
const [url, init] = fetchMock.mock.calls[0] ?? [];
const call = fetchMock.mock.calls[0];
expect(call).toBeDefined();
const [url, init] = call as [RequestInfo | URL, RequestInit | undefined];
expect(url).toBe("https://proxy.example.com/embeddings");
const headers = (init?.headers ?? {}) as Record<string, string>;
@@ -123,7 +127,9 @@ describe("voyage embedding provider", () => {
await result.provider.embedBatch(["doc1", "doc2"]);
const [, init] = fetchMock.mock.calls[0] ?? [];
const call = fetchMock.mock.calls[0];
expect(call).toBeDefined();
const [, init] = call as [RequestInfo | URL, RequestInit | undefined];
const body = JSON.parse(init?.body as string);
expect(body).toEqual({
model: "voyage-4-large",