From b49b8380a5fbc49acc94db0dda0944dec6e0ba30 Mon Sep 17 00:00:00 2001 From: George Pickett Date: Sun, 8 Feb 2026 14:04:58 -0800 Subject: [PATCH] test(config): cover maxTokens clamping --- src/config/defaults.ts | 1 - src/config/model-alias-defaults.test.ts | 19 +++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/src/config/defaults.ts b/src/config/defaults.ts index 7d804a40b5..290cde5699 100644 --- a/src/config/defaults.ts +++ b/src/config/defaults.ts @@ -215,7 +215,6 @@ export function applyModelDefaults(cfg: OpenClawConfig): OpenClawConfig { } const defaultMaxTokens = Math.min(DEFAULT_MODEL_MAX_TOKENS, contextWindow); - // Clamp maxTokens to contextWindow to prevent invalid configurations const rawMaxTokens = isPositiveNumber(raw.maxTokens) ? raw.maxTokens : defaultMaxTokens; const maxTokens = Math.min(rawMaxTokens, contextWindow); if (raw.maxTokens !== maxTokens) { diff --git a/src/config/model-alias-defaults.test.ts b/src/config/model-alias-defaults.test.ts index 82bff85dad..b2db55c570 100644 --- a/src/config/model-alias-defaults.test.ts +++ b/src/config/model-alias-defaults.test.ts @@ -80,4 +80,23 @@ describe("applyModelDefaults", () => { expect(model?.contextWindow).toBe(DEFAULT_CONTEXT_TOKENS); expect(model?.maxTokens).toBe(8192); }); + + it("clamps maxTokens to contextWindow", () => { + const cfg = { + models: { + providers: { + myproxy: { + api: "openai-completions", + models: [{ id: "gpt-5.2", name: "GPT-5.2", contextWindow: 32768, maxTokens: 40960 }], + }, + }, + }, + } satisfies OpenClawConfig; + + const next = applyModelDefaults(cfg); + const model = next.models?.providers?.myproxy?.models?.[0]; + + expect(model?.contextWindow).toBe(32768); + expect(model?.maxTokens).toBe(32768); + }); });