docs(zai): document tool_stream defaults

This commit is contained in:
Sebastian
2026-02-17 09:22:26 -05:00
parent 7caf874546
commit 5d1bcc76cc
4 changed files with 69 additions and 85 deletions

View File

@@ -16,6 +16,7 @@ Docs: https://docs.openclaw.ai
- Cron/Gateway: separate per-job webhook delivery (`delivery.mode = "webhook"`) from announce delivery, enforce valid HTTP(S) webhook URLs, and keep a temporary legacy `notify + cron.webhook` fallback for stored jobs. (#17901) Thanks @advaitpaliwal.
- Discord: add per-button `allowedUsers` allowlist for interactive components to restrict who can click buttons. Thanks @thewilloftheshadow.
- Docker: add optional `OPENCLAW_INSTALL_BROWSER` build arg to preinstall Chromium + Xvfb in the Docker image, avoiding runtime Playwright installs. (#18449)
- Agents/Z.AI: enable `tool_stream` by default for real-time tool call streaming, with opt-out via `params.tool_stream: false`. (#18173) Thanks @tianxiao1430-jpg.
### Fixes

View File

@@ -666,6 +666,7 @@ Time format in system prompt. Default: `auto` (OS preference).
Your configured aliases always win over defaults.
Z.AI GLM-4.x models automatically enable thinking mode unless you set `--thinking off` or define `agents.defaults.models["zai/<model>"].params.thinking` yourself.
Z.AI models enable `tool_stream` by default for tool call streaming. Set `agents.defaults.models["zai/<model>"].params.tool_stream` to `false` to disable it.
### `agents.defaults.cliBackends`

View File

@@ -32,5 +32,7 @@ openclaw onboard --zai-api-key "$ZAI_API_KEY"
## Notes
- GLM models are available as `zai/<model>` (example: `zai/glm-5`).
- `tool_stream` is enabled by default for Z.AI tool-call streaming. Set
`agents.defaults.models["zai/<model>"].params.tool_stream` to `false` to disable it.
- See [/providers/glm](/providers/glm) for the model family overview.
- Z.AI uses Bearer auth with your API key.

View File

@@ -1,4 +1,5 @@
import type { StreamFn } from "@mariozechner/pi-agent-core";
import type { Context, Model, SimpleStreamOptions } from "@mariozechner/pi-ai";
import { describe, expect, it, vi } from "vitest";
import { applyExtraParamsToAgent } from "./extra-params.js";
@@ -10,104 +11,83 @@ vi.mock("@mariozechner/pi-ai", () => ({
})),
}));
type ToolStreamCase = {
applyProvider: string;
applyModelId: string;
model: Model<"openai-completions">;
cfg?: Parameters<typeof applyExtraParamsToAgent>[1];
options?: SimpleStreamOptions;
};
function runToolStreamCase(params: ToolStreamCase) {
const payload: Record<string, unknown> = { model: params.model.id, messages: [] };
const baseStreamFn: StreamFn = (_model, _context, options) => {
options?.onPayload?.(payload);
return {} as ReturnType<StreamFn>;
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(agent, params.cfg, params.applyProvider, params.applyModelId);
const context: Context = { messages: [] };
void agent.streamFn?.(params.model, context, params.options ?? {});
return payload;
}
describe("extra-params: Z.AI tool_stream support", () => {
it("should inject tool_stream=true for zai provider by default", () => {
const mockStreamFn: StreamFn = vi.fn((model, context, options) => {
// Capture the payload that would be sent
options?.onPayload?.({ model: model.id, messages: [] });
return {
push: vi.fn(),
result: vi.fn().mockResolvedValue({
role: "assistant",
content: [{ type: "text", text: "ok" }],
stopReason: "stop",
}),
} as unknown as ReturnType<StreamFn>;
it("injects tool_stream=true for zai provider by default", () => {
const payload = runToolStreamCase({
applyProvider: "zai",
applyModelId: "glm-5",
model: {
api: "openai-completions",
provider: "zai",
id: "glm-5",
} as Model<"openai-completions">,
});
const agent = { streamFn: mockStreamFn };
const cfg = {
agents: {
defaults: {},
},
};
applyExtraParamsToAgent(
agent,
cfg as unknown as Parameters<typeof applyExtraParamsToAgent>[1],
"zai",
"glm-5",
);
// The streamFn should be wrapped
expect(agent.streamFn).toBeDefined();
expect(agent.streamFn).not.toBe(mockStreamFn);
expect(payload.tool_stream).toBe(true);
});
it("should not inject tool_stream for non-zai providers", () => {
const mockStreamFn: StreamFn = vi.fn(
() =>
({
push: vi.fn(),
result: vi.fn().mockResolvedValue({
role: "assistant",
content: [{ type: "text", text: "ok" }],
stopReason: "stop",
}),
}) as unknown as ReturnType<StreamFn>,
);
it("does not inject tool_stream for non-zai providers", () => {
const payload = runToolStreamCase({
applyProvider: "openai",
applyModelId: "gpt-5",
model: {
api: "openai-completions",
provider: "openai",
id: "gpt-5",
} as Model<"openai-completions">,
});
const agent = { streamFn: mockStreamFn };
const cfg = {};
applyExtraParamsToAgent(
agent,
cfg as unknown as Parameters<typeof applyExtraParamsToAgent>[1],
"anthropic",
"claude-opus-4-6",
);
// Should remain unchanged (except for OpenAI wrapper)
expect(agent.streamFn).toBeDefined();
expect(payload).not.toHaveProperty("tool_stream");
});
it("should allow disabling tool_stream via params", () => {
const mockStreamFn: StreamFn = vi.fn(
() =>
({
push: vi.fn(),
result: vi.fn().mockResolvedValue({
role: "assistant",
content: [{ type: "text", text: "ok" }],
stopReason: "stop",
}),
}) as unknown as ReturnType<StreamFn>,
);
const agent = { streamFn: mockStreamFn };
const cfg = {
agents: {
defaults: {
models: {
"zai/glm-5": {
params: {
tool_stream: false,
it("allows disabling tool_stream via params", () => {
const payload = runToolStreamCase({
applyProvider: "zai",
applyModelId: "glm-5",
model: {
api: "openai-completions",
provider: "zai",
id: "glm-5",
} as Model<"openai-completions">,
cfg: {
agents: {
defaults: {
models: {
"zai/glm-5": {
params: {
tool_stream: false,
},
},
},
},
},
},
};
});
applyExtraParamsToAgent(
agent,
cfg as unknown as Parameters<typeof applyExtraParamsToAgent>[1],
"zai",
"glm-5",
);
// The tool_stream wrapper should be applied but with enabled=false
// In this case, it should just return the underlying streamFn
expect(agent.streamFn).toBeDefined();
expect(payload).not.toHaveProperty("tool_stream");
});
});