diff --git a/CHANGELOG.md b/CHANGELOG.md index b125b42a0f..7203dded45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ Docs: https://docs.openclaw.ai - Cron/Gateway: separate per-job webhook delivery (`delivery.mode = "webhook"`) from announce delivery, enforce valid HTTP(S) webhook URLs, and keep a temporary legacy `notify + cron.webhook` fallback for stored jobs. (#17901) Thanks @advaitpaliwal. - Discord: add per-button `allowedUsers` allowlist for interactive components to restrict who can click buttons. Thanks @thewilloftheshadow. - Docker: add optional `OPENCLAW_INSTALL_BROWSER` build arg to preinstall Chromium + Xvfb in the Docker image, avoiding runtime Playwright installs. (#18449) +- Agents/Z.AI: enable `tool_stream` by default for real-time tool call streaming, with opt-out via `params.tool_stream: false`. (#18173) Thanks @tianxiao1430-jpg. ### Fixes diff --git a/docs/gateway/configuration-reference.md b/docs/gateway/configuration-reference.md index 92e4f9d436..0c07aeccf6 100644 --- a/docs/gateway/configuration-reference.md +++ b/docs/gateway/configuration-reference.md @@ -666,6 +666,7 @@ Time format in system prompt. Default: `auto` (OS preference). Your configured aliases always win over defaults. Z.AI GLM-4.x models automatically enable thinking mode unless you set `--thinking off` or define `agents.defaults.models["zai/"].params.thinking` yourself. +Z.AI models enable `tool_stream` by default for tool call streaming. Set `agents.defaults.models["zai/"].params.tool_stream` to `false` to disable it. ### `agents.defaults.cliBackends` diff --git a/docs/providers/zai.md b/docs/providers/zai.md index 07b8171936..93313acba3 100644 --- a/docs/providers/zai.md +++ b/docs/providers/zai.md @@ -32,5 +32,7 @@ openclaw onboard --zai-api-key "$ZAI_API_KEY" ## Notes - GLM models are available as `zai/` (example: `zai/glm-5`). +- `tool_stream` is enabled by default for Z.AI tool-call streaming. Set + `agents.defaults.models["zai/"].params.tool_stream` to `false` to disable it. - See [/providers/glm](/providers/glm) for the model family overview. - Z.AI uses Bearer auth with your API key. diff --git a/src/agents/pi-embedded-runner/extra-params.zai-tool-stream.test.ts b/src/agents/pi-embedded-runner/extra-params.zai-tool-stream.test.ts index 569816339c..3a757cea07 100644 --- a/src/agents/pi-embedded-runner/extra-params.zai-tool-stream.test.ts +++ b/src/agents/pi-embedded-runner/extra-params.zai-tool-stream.test.ts @@ -1,4 +1,5 @@ import type { StreamFn } from "@mariozechner/pi-agent-core"; +import type { Context, Model, SimpleStreamOptions } from "@mariozechner/pi-ai"; import { describe, expect, it, vi } from "vitest"; import { applyExtraParamsToAgent } from "./extra-params.js"; @@ -10,104 +11,83 @@ vi.mock("@mariozechner/pi-ai", () => ({ })), })); +type ToolStreamCase = { + applyProvider: string; + applyModelId: string; + model: Model<"openai-completions">; + cfg?: Parameters[1]; + options?: SimpleStreamOptions; +}; + +function runToolStreamCase(params: ToolStreamCase) { + const payload: Record = { model: params.model.id, messages: [] }; + const baseStreamFn: StreamFn = (_model, _context, options) => { + options?.onPayload?.(payload); + return {} as ReturnType; + }; + const agent = { streamFn: baseStreamFn }; + + applyExtraParamsToAgent(agent, params.cfg, params.applyProvider, params.applyModelId); + + const context: Context = { messages: [] }; + void agent.streamFn?.(params.model, context, params.options ?? {}); + + return payload; +} + describe("extra-params: Z.AI tool_stream support", () => { - it("should inject tool_stream=true for zai provider by default", () => { - const mockStreamFn: StreamFn = vi.fn((model, context, options) => { - // Capture the payload that would be sent - options?.onPayload?.({ model: model.id, messages: [] }); - return { - push: vi.fn(), - result: vi.fn().mockResolvedValue({ - role: "assistant", - content: [{ type: "text", text: "ok" }], - stopReason: "stop", - }), - } as unknown as ReturnType; + it("injects tool_stream=true for zai provider by default", () => { + const payload = runToolStreamCase({ + applyProvider: "zai", + applyModelId: "glm-5", + model: { + api: "openai-completions", + provider: "zai", + id: "glm-5", + } as Model<"openai-completions">, }); - const agent = { streamFn: mockStreamFn }; - const cfg = { - agents: { - defaults: {}, - }, - }; - - applyExtraParamsToAgent( - agent, - cfg as unknown as Parameters[1], - "zai", - "glm-5", - ); - - // The streamFn should be wrapped - expect(agent.streamFn).toBeDefined(); - expect(agent.streamFn).not.toBe(mockStreamFn); + expect(payload.tool_stream).toBe(true); }); - it("should not inject tool_stream for non-zai providers", () => { - const mockStreamFn: StreamFn = vi.fn( - () => - ({ - push: vi.fn(), - result: vi.fn().mockResolvedValue({ - role: "assistant", - content: [{ type: "text", text: "ok" }], - stopReason: "stop", - }), - }) as unknown as ReturnType, - ); + it("does not inject tool_stream for non-zai providers", () => { + const payload = runToolStreamCase({ + applyProvider: "openai", + applyModelId: "gpt-5", + model: { + api: "openai-completions", + provider: "openai", + id: "gpt-5", + } as Model<"openai-completions">, + }); - const agent = { streamFn: mockStreamFn }; - const cfg = {}; - - applyExtraParamsToAgent( - agent, - cfg as unknown as Parameters[1], - "anthropic", - "claude-opus-4-6", - ); - - // Should remain unchanged (except for OpenAI wrapper) - expect(agent.streamFn).toBeDefined(); + expect(payload).not.toHaveProperty("tool_stream"); }); - it("should allow disabling tool_stream via params", () => { - const mockStreamFn: StreamFn = vi.fn( - () => - ({ - push: vi.fn(), - result: vi.fn().mockResolvedValue({ - role: "assistant", - content: [{ type: "text", text: "ok" }], - stopReason: "stop", - }), - }) as unknown as ReturnType, - ); - - const agent = { streamFn: mockStreamFn }; - const cfg = { - agents: { - defaults: { - models: { - "zai/glm-5": { - params: { - tool_stream: false, + it("allows disabling tool_stream via params", () => { + const payload = runToolStreamCase({ + applyProvider: "zai", + applyModelId: "glm-5", + model: { + api: "openai-completions", + provider: "zai", + id: "glm-5", + } as Model<"openai-completions">, + cfg: { + agents: { + defaults: { + models: { + "zai/glm-5": { + params: { + tool_stream: false, + }, }, }, }, }, }, - }; + }); - applyExtraParamsToAgent( - agent, - cfg as unknown as Parameters[1], - "zai", - "glm-5", - ); - - // The tool_stream wrapper should be applied but with enabled=false - // In this case, it should just return the underlying streamFn - expect(agent.streamFn).toBeDefined(); + expect(payload).not.toHaveProperty("tool_stream"); }); });