diff --git a/src/agents/agent-scope.test.ts b/src/agents/agent-scope.test.ts index 585c7bca90..bc06bde9ce 100644 --- a/src/agents/agent-scope.test.ts +++ b/src/agents/agent-scope.test.ts @@ -57,7 +57,7 @@ describe("resolveAgentConfig", () => { defaults: { model: { primary: "anthropic/claude-sonnet-4", - fallbacks: ["openai/gpt-4.1"], + fallbacks: ["openai/gpt-5-nano"], }, }, list: [ diff --git a/src/agents/model-fallback.test.ts b/src/agents/model-fallback.test.ts index ea846eabbe..dbffd54810 100644 --- a/src/agents/model-fallback.test.ts +++ b/src/agents/model-fallback.test.ts @@ -8,7 +8,7 @@ function makeCfg(overrides: Partial = {}): ClawdbotConfig { agents: { defaults: { model: { - primary: "openai/gpt-4.1-mini", + primary: "openai/gpt-5-nano", fallbacks: ["anthropic/claude-haiku-3-5"], }, }, @@ -29,7 +29,7 @@ describe("runWithModelFallback", () => { runWithModelFallback({ cfg, provider: "openai", - model: "gpt-4.1-mini", + model: "gpt-5-nano", run, }), ).rejects.toThrow("bad request"); @@ -46,7 +46,7 @@ describe("runWithModelFallback", () => { const result = await runWithModelFallback({ cfg, provider: "openai", - model: "gpt-4.1-mini", + model: "gpt-5-nano", run, }); @@ -68,7 +68,7 @@ describe("runWithModelFallback", () => { const result = await runWithModelFallback({ cfg, provider: "openai", - model: "gpt-4.1-mini", + model: "gpt-5-nano", run, }); @@ -92,7 +92,7 @@ describe("runWithModelFallback", () => { const result = await runWithModelFallback({ cfg, provider: "openai", - model: "gpt-4.1-mini", + model: "gpt-5-nano", run, }); @@ -129,7 +129,7 @@ describe("runWithModelFallback", () => { agents: { defaults: { model: { - primary: "openai/gpt-4.1-mini", + primary: "openai/gpt-5-nano", }, }, }, @@ -173,13 +173,13 @@ describe("runWithModelFallback", () => { cfg, provider: "anthropic", model: "claude-opus-4-5", - fallbacksOverride: ["openai/gpt-4.1"], + fallbacksOverride: ["openai/gpt-5-nano"], run: async (provider, model) => { calls.push({ provider, model }); if (provider === "anthropic") { throw Object.assign(new Error("nope"), { status: 401 }); } - if (provider === "openai" && model === "gpt-4.1") { + if (provider === "openai" && model === "gpt-5-nano") { return "ok"; } throw new Error(`unexpected candidate: ${provider}/${model}`); @@ -189,7 +189,7 @@ describe("runWithModelFallback", () => { expect(res.result).toBe("ok"); expect(calls).toEqual([ { provider: "anthropic", model: "claude-opus-4-5" }, - { provider: "openai", model: "gpt-4.1" }, + { provider: "openai", model: "gpt-5-nano" }, ]); }); @@ -234,7 +234,7 @@ describe("runWithModelFallback", () => { const result = await runWithModelFallback({ cfg, provider: "openai", - model: "gpt-4.1-mini", + model: "gpt-5-nano", run, }); @@ -254,7 +254,7 @@ describe("runWithModelFallback", () => { const result = await runWithModelFallback({ cfg, provider: "openai", - model: "gpt-4.1-mini", + model: "gpt-5-nano", run, }); @@ -269,7 +269,7 @@ describe("runWithModelFallback", () => { agents: { defaults: { model: { - primary: "openai/gpt-4.1-mini", + primary: "openai/gpt-5-nano", fallbacks: [], }, }, @@ -292,6 +292,6 @@ describe("runWithModelFallback", () => { expect(result.result).toBe("ok"); expect(run).toHaveBeenCalledTimes(2); expect(result.provider).toBe("openai"); - expect(result.model).toBe("gpt-4.1-mini"); + expect(result.model).toBe("gpt-5-nano"); }); }); diff --git a/src/agents/model-selection.test.ts b/src/agents/model-selection.test.ts index c054d64b87..73e59da276 100644 --- a/src/agents/model-selection.test.ts +++ b/src/agents/model-selection.test.ts @@ -13,8 +13,8 @@ import { const catalog = [ { provider: "openai", - id: "gpt-4", - name: "GPT-4", + id: "gpt-5-nano", + name: "GPT-5 Nano", }, ]; @@ -24,7 +24,7 @@ describe("buildAllowedModelSet", () => { agents: { defaults: { models: { - "openai/gpt-4": { alias: "gpt4" }, + "openai/gpt-5-nano": { alias: "gpt5nano" }, }, }, }, @@ -38,7 +38,9 @@ describe("buildAllowedModelSet", () => { }); expect(allowed.allowAny).toBe(false); - expect(allowed.allowedKeys.has(modelKey("openai", "gpt-4"))).toBe(true); + expect(allowed.allowedKeys.has(modelKey("openai", "gpt-5-nano"))).toBe( + true, + ); expect(allowed.allowedKeys.has(modelKey("claude-cli", "opus-4.5"))).toBe( true, ); @@ -57,7 +59,9 @@ describe("buildAllowedModelSet", () => { }); expect(allowed.allowAny).toBe(true); - expect(allowed.allowedKeys.has(modelKey("openai", "gpt-4"))).toBe(true); + expect(allowed.allowedKeys.has(modelKey("openai", "gpt-5-nano"))).toBe( + true, + ); expect(allowed.allowedKeys.has(modelKey("claude-cli", "opus-4.5"))).toBe( true, ); @@ -236,7 +240,7 @@ describe("resolveAllowedModelRef", () => { agents: { defaults: { models: { - "openai/gpt-4": { alias: "GPT4" }, + "openai/gpt-5-nano": { alias: "GPT5NANO" }, }, }, }, @@ -244,12 +248,12 @@ describe("resolveAllowedModelRef", () => { const resolved = resolveAllowedModelRef({ cfg, catalog: [ - { provider: "openai", id: "gpt-4", name: "GPT-4" }, + { provider: "openai", id: "gpt-5-nano", name: "GPT-5 Nano" }, { provider: "anthropic", id: "claude-sonnet-4-1", name: "Sonnet" }, ], raw: "anthropic/claude-sonnet-4-1", defaultProvider: "openai", - defaultModel: "gpt-4", + defaultModel: "gpt-5-nano", }); expect(resolved).toEqual({ error: "model not allowed: anthropic/claude-sonnet-4-1", diff --git a/src/agents/pi-embedded-runner-extraparams.test.ts b/src/agents/pi-embedded-runner-extraparams.test.ts index d1243da61f..b9e2a8d584 100644 --- a/src/agents/pi-embedded-runner-extraparams.test.ts +++ b/src/agents/pi-embedded-runner-extraparams.test.ts @@ -207,11 +207,11 @@ describe("resolveExtraParams", () => { expect(result).toBeUndefined(); }); - it("returns undefined for openai/gpt-4 with no config", () => { + it("returns undefined for openai/gpt-5-nano with no config", () => { const result = resolveExtraParams({ cfg: undefined, provider: "openai", - modelId: "gpt-4", + modelId: "gpt-5-nano", }); expect(result).toBeUndefined(); @@ -223,7 +223,7 @@ describe("resolveExtraParams", () => { agents: { defaults: { models: { - "openai/gpt-4": { + "openai/gpt-5-nano": { params: { logprobs: true, top_logprobs: 5, @@ -234,7 +234,7 @@ describe("resolveExtraParams", () => { }, }, provider: "openai", - modelId: "gpt-4", + modelId: "gpt-5-nano", }); expect(result).toEqual({ diff --git a/src/auto-reply/reply.block-streaming.test.ts b/src/auto-reply/reply.block-streaming.test.ts index 9a13e65cca..2ffdb1843e 100644 --- a/src/auto-reply/reply.block-streaming.test.ts +++ b/src/auto-reply/reply.block-streaming.test.ts @@ -42,7 +42,7 @@ describe("block streaming", () => { piEmbeddedMock.runEmbeddedPiAgent.mockReset(); vi.mocked(loadModelCatalog).mockResolvedValue([ { id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" }, - { id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" }, + { id: "gpt-5-nano", name: "GPT-5 Nano", provider: "openai" }, ]); }); diff --git a/src/auto-reply/reply.directive.test.ts b/src/auto-reply/reply.directive.test.ts index fc499fd02b..efba1748f8 100644 --- a/src/auto-reply/reply.directive.test.ts +++ b/src/auto-reply/reply.directive.test.ts @@ -61,7 +61,7 @@ describe("directive behavior", () => { vi.mocked(loadModelCatalog).mockResolvedValue([ { id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" }, { id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" }, - { id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" }, + { id: "gpt-5-nano", name: "GPT-5 Nano", provider: "openai" }, ]); }); @@ -143,7 +143,7 @@ describe("directive behavior", () => { { agents: { defaults: { - model: "openai/gpt-4.1-mini", + model: "openai/gpt-5-nano", workspace: path.join(home, "clawd"), }, }, @@ -1655,7 +1655,7 @@ describe("directive behavior", () => { workspace: path.join(home, "clawd"), models: { "anthropic/claude-opus-4-5": {}, - "openai/gpt-4.1-mini": {}, + "openai/gpt-5-nano": {}, }, }, }, @@ -1666,7 +1666,7 @@ describe("directive behavior", () => { const text = Array.isArray(res) ? res[0]?.text : res?.text; expect(text).toContain("anthropic/claude-opus-4-5"); expect(text).toContain("Pick: /model <#> or /model "); - expect(text).toContain("gpt-4.1-mini — openai"); + expect(text).toContain("gpt-5-nano — openai"); expect(text).not.toContain("claude-sonnet-4-1"); expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); }); @@ -1687,7 +1687,7 @@ describe("directive behavior", () => { workspace: path.join(home, "clawd"), models: { "anthropic/claude-opus-4-5": {}, - "openai/gpt-4.1-mini": {}, + "openai/gpt-5-nano": {}, }, }, }, @@ -1697,7 +1697,7 @@ describe("directive behavior", () => { const text = Array.isArray(res) ? res[0]?.text : res?.text; expect(text).toContain("anthropic/claude-opus-4-5"); - expect(text).toContain("openai/gpt-4.1-mini"); + expect(text).toContain("openai/gpt-5-nano"); expect(text).not.toContain("claude-sonnet-4-1"); expect(text).toContain("auth:"); expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); @@ -1719,7 +1719,7 @@ describe("directive behavior", () => { workspace: path.join(home, "clawd"), models: { "anthropic/claude-opus-4-5": {}, - "openai/gpt-4.1-mini": {}, + "openai/gpt-5-nano": {}, }, }, }, @@ -1730,7 +1730,7 @@ describe("directive behavior", () => { const text = Array.isArray(res) ? res[0]?.text : res?.text; expect(text).toContain("Pick: /model <#> or /model "); expect(text).toContain("claude-opus-4-5 — anthropic"); - expect(text).toContain("gpt-4.1-mini — openai"); + expect(text).toContain("gpt-5-nano — openai"); expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); }); }); @@ -1751,7 +1751,7 @@ describe("directive behavior", () => { workspace: path.join(home, "clawd"), models: { "anthropic/claude-opus-4-5": {}, - "openai/gpt-4.1-mini": {}, + "openai/gpt-5-nano": {}, }, }, }, @@ -1762,7 +1762,7 @@ describe("directive behavior", () => { const text = Array.isArray(res) ? res[0]?.text : res?.text; expect(text).toContain("Pick: /model <#> or /model "); expect(text).toContain("claude-opus-4-5 — anthropic"); - expect(text).toContain("gpt-4.1-mini — openai"); + expect(text).toContain("gpt-5-nano — openai"); expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); }); }); @@ -1778,7 +1778,7 @@ describe("directive behavior", () => { id: "claude-opus-4-5", name: "Claude Opus 4.5", }, - { provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" }, + { provider: "openai", id: "gpt-5-nano", name: "GPT-5 Nano" }, ]); const storePath = path.join(home, "sessions.json"); @@ -1792,7 +1792,7 @@ describe("directive behavior", () => { workspace: path.join(home, "clawd"), models: { "anthropic/claude-opus-4-5": {}, - "openai/gpt-4.1-mini": {}, + "openai/gpt-5-nano": {}, "minimax/MiniMax-M2.1": { alias: "minimax" }, }, }, @@ -1813,7 +1813,7 @@ describe("directive behavior", () => { const text = Array.isArray(res) ? res[0]?.text : res?.text; expect(text).toContain("claude-opus-4-5 — anthropic"); - expect(text).toContain("gpt-4.1-mini — openai"); + expect(text).toContain("gpt-5-nano — openai"); expect(text).toContain("MiniMax-M2.1 — minimax"); expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); }); @@ -1853,7 +1853,7 @@ describe("directive behavior", () => { const storePath = path.join(home, "sessions.json"); await getReplyFromConfig( - { Body: "/model openai/gpt-4.1-mini", From: "+1222", To: "+1222" }, + { Body: "/model openai/gpt-5-nano", From: "+1222", To: "+1222" }, {}, { agents: { @@ -1862,7 +1862,7 @@ describe("directive behavior", () => { workspace: path.join(home, "clawd"), models: { "anthropic/claude-opus-4-5": {}, - "openai/gpt-4.1-mini": {}, + "openai/gpt-5-nano": {}, }, }, }, @@ -1871,7 +1871,7 @@ describe("directive behavior", () => { ); assertModelSelection(storePath, { - model: "gpt-4.1-mini", + model: "gpt-5-nano", provider: "openai", }); expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); @@ -1889,10 +1889,10 @@ describe("directive behavior", () => { { agents: { defaults: { - model: { primary: "openai/gpt-4.1-mini" }, + model: { primary: "openai/gpt-5-nano" }, workspace: path.join(home, "clawd"), models: { - "openai/gpt-4.1-mini": {}, + "openai/gpt-5-nano": {}, "anthropic/claude-opus-4-5": { alias: "Opus" }, }, }, @@ -2209,10 +2209,10 @@ describe("directive behavior", () => { { agents: { defaults: { - model: { primary: "openai/gpt-4.1-mini" }, + model: { primary: "openai/gpt-5-nano" }, workspace: path.join(home, "clawd"), models: { - "openai/gpt-4.1-mini": {}, + "openai/gpt-5-nano": {}, "anthropic/claude-opus-4-5": { alias: "Opus" }, }, }, @@ -2242,10 +2242,10 @@ describe("directive behavior", () => { { agents: { defaults: { - model: { primary: "openai/gpt-4.1-mini" }, + model: { primary: "openai/gpt-5-nano" }, workspace: path.join(home, "clawd"), models: { - "openai/gpt-4.1-mini": {}, + "openai/gpt-5-nano": {}, "anthropic/claude-opus-4-5": { alias: "Opus" }, }, }, @@ -2278,7 +2278,7 @@ describe("directive behavior", () => { { agents: { defaults: { - model: { primary: "openai/gpt-4.1-mini" }, + model: { primary: "openai/gpt-5-nano" }, workspace: path.join(home, "clawd"), }, }, @@ -2309,7 +2309,7 @@ describe("directive behavior", () => { { agents: { defaults: { - model: { primary: "openai/gpt-4.1-mini" }, + model: { primary: "openai/gpt-5-nano" }, workspace: path.join(home, "clawd"), }, }, @@ -2336,7 +2336,7 @@ describe("directive behavior", () => { const res = await getReplyFromConfig( { - Body: "please sync /model openai/gpt-4.1-mini now", + Body: "please sync /model openai/gpt-5-nano now", From: "+1004", To: "+2000", }, @@ -2348,7 +2348,7 @@ describe("directive behavior", () => { workspace: path.join(home, "clawd"), models: { "anthropic/claude-opus-4-5": {}, - "openai/gpt-4.1-mini": {}, + "openai/gpt-5-nano": {}, }, }, }, diff --git a/src/auto-reply/reply.triggers.test.ts b/src/auto-reply/reply.triggers.test.ts index 9925b27bf0..5cf6f20703 100644 --- a/src/auto-reply/reply.triggers.test.ts +++ b/src/auto-reply/reply.triggers.test.ts @@ -41,7 +41,7 @@ const modelCatalogMocks = vi.hoisted(() => ({ name: "Claude Opus 4.5 (OpenRouter)", contextWindow: 200000, }, - { provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" }, + { provider: "openai", id: "gpt-5-nano", name: "GPT-5 Nano" }, { provider: "openai", id: "gpt-5.2", name: "GPT-5.2" }, { provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" }, { provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" }, @@ -320,7 +320,7 @@ describe("trigger handling", () => { const res = await getReplyFromConfig( { - Body: "/model openai/gpt-4.1-mini", + Body: "/model openai/gpt-5-nano", From: "telegram:111", To: "telegram:111", ChatType: "direct", @@ -336,11 +336,11 @@ describe("trigger handling", () => { ); const text = Array.isArray(res) ? res[0]?.text : res?.text; - expect(text).toContain("Model set to openai/gpt-4.1-mini"); + expect(text).toContain("Model set to openai/gpt-5-nano"); const store = loadSessionStore(cfg.session.store); expect(store[targetSessionKey]?.providerOverride).toBe("openai"); - expect(store[targetSessionKey]?.modelOverride).toBe("gpt-4.1-mini"); + expect(store[targetSessionKey]?.modelOverride).toBe("gpt-5-nano"); expect(store[slashSessionKey]).toBeUndefined(); vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ @@ -368,7 +368,7 @@ describe("trigger handling", () => { expect(vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]).toEqual( expect.objectContaining({ provider: "openai", - model: "gpt-4.1-mini", + model: "gpt-5-nano", }), ); }); diff --git a/src/auto-reply/status.test.ts b/src/auto-reply/status.test.ts index 9273e187fb..32cc8e4745 100644 --- a/src/auto-reply/status.test.ts +++ b/src/auto-reply/status.test.ts @@ -122,7 +122,7 @@ describe("buildStatusMessage", () => { sessionId: "override-1", updatedAt: 0, providerOverride: "openai", - modelOverride: "gpt-4.1-mini", + modelOverride: "gpt-5-nano", modelProvider: "anthropic", model: "claude-haiku-4-5", contextTokens: 32_000, @@ -133,7 +133,7 @@ describe("buildStatusMessage", () => { modelAuth: "api-key", }); - expect(normalizeTestText(text)).toContain("Model: openai/gpt-4.1-mini"); + expect(normalizeTestText(text)).toContain("Model: openai/gpt-5-nano"); }); it("keeps provider prefix from configured model", () => { diff --git a/src/auto-reply/thinking.test.ts b/src/auto-reply/thinking.test.ts index 6bc85a93ba..74d52a3ba9 100644 --- a/src/auto-reply/thinking.test.ts +++ b/src/auto-reply/thinking.test.ts @@ -25,9 +25,7 @@ describe("listThinkingLevels", () => { }); it("excludes xhigh for non-codex models", () => { - expect(listThinkingLevels(undefined, "gpt-4.1-mini")).not.toContain( - "xhigh", - ); + expect(listThinkingLevels(undefined, "gpt-5-nano")).not.toContain("xhigh"); }); }); diff --git a/src/commands/agent.test.ts b/src/commands/agent.test.ts index 3f66c78a76..75294ee793 100644 --- a/src/commands/agent.test.ts +++ b/src/commands/agent.test.ts @@ -152,10 +152,10 @@ describe("agentCommand", () => { await withTempHome(async (home) => { const store = path.join(home, "sessions.json"); mockConfig(home, store, { - model: { primary: "openai/gpt-4.1-mini" }, + model: { primary: "openai/gpt-5-nano" }, models: { "anthropic/claude-opus-4-5": {}, - "openai/gpt-4.1-mini": {}, + "openai/gpt-5-nano": {}, }, }); @@ -163,7 +163,7 @@ describe("agentCommand", () => { const callArgs = vi.mocked(runEmbeddedPiAgent).mock.calls.at(-1)?.[0]; expect(callArgs?.provider).toBe("openai"); - expect(callArgs?.model).toBe("gpt-4.1-mini"); + expect(callArgs?.model).toBe("gpt-5-nano"); }); }); diff --git a/src/commands/agents.test.ts b/src/commands/agents.test.ts index 64ea85501b..aece00dcef 100644 --- a/src/commands/agents.test.ts +++ b/src/commands/agents.test.ts @@ -27,7 +27,7 @@ describe("agents helpers", () => { name: "Work", workspace: "/work-ws", agentDir: "/state/agents/work/agent", - model: "openai/gpt-4.1", + model: "openai/gpt-5-nano", }, ], }, diff --git a/src/commands/models.list.test.ts b/src/commands/models.list.test.ts index 3a5b34ef05..76c0b0847c 100644 --- a/src/commands/models.list.test.ts +++ b/src/commands/models.list.test.ts @@ -157,8 +157,8 @@ describe("models list/status", () => { }, { provider: "openai", - id: "gpt-4.1-mini", - name: "GPT-4.1 mini", + id: "gpt-5-nano", + name: "GPT-5 Nano", input: ["text"], baseUrl: "https://api.openai.com/v1", contextWindow: 128000, @@ -199,8 +199,8 @@ describe("models list/status", () => { }, { provider: "openai", - id: "gpt-4.1-mini", - name: "GPT-4.1 mini", + id: "gpt-5-nano", + name: "GPT-5 Nano", input: ["text"], baseUrl: "https://api.openai.com/v1", contextWindow: 128000, @@ -241,8 +241,8 @@ describe("models list/status", () => { }, { provider: "openai", - id: "gpt-4.1-mini", - name: "GPT-4.1 mini", + id: "gpt-5-nano", + name: "GPT-5 Nano", input: ["text"], baseUrl: "https://api.openai.com/v1", contextWindow: 128000, diff --git a/src/config/config.test.ts b/src/config/config.test.ts index a42e08cf44..2b0447cac0 100644 --- a/src/config/config.test.ts +++ b/src/config/config.test.ts @@ -1603,10 +1603,10 @@ describe("legacy config detection", () => { const res = migrateLegacyConfig({ agent: { model: "anthropic/claude-opus-4-5", - modelFallbacks: ["openai/gpt-4.1-mini"], - imageModel: "openai/gpt-4.1-mini", + modelFallbacks: ["openai/gpt-5-nano"], + imageModel: "openai/gpt-5-nano", imageModelFallbacks: ["anthropic/claude-opus-4-5"], - allowedModels: ["anthropic/claude-opus-4-5", "openai/gpt-4.1-mini"], + allowedModels: ["anthropic/claude-opus-4-5", "openai/gpt-5-nano"], modelAliases: { Opus: "anthropic/claude-opus-4-5" }, }, }); @@ -1615,10 +1615,10 @@ describe("legacy config detection", () => { "anthropic/claude-opus-4-5", ); expect(res.config?.agents?.defaults?.model?.fallbacks).toEqual([ - "openai/gpt-4.1-mini", + "openai/gpt-5-nano", ]); expect(res.config?.agents?.defaults?.imageModel?.primary).toBe( - "openai/gpt-4.1-mini", + "openai/gpt-5-nano", ); expect(res.config?.agents?.defaults?.imageModel?.fallbacks).toEqual([ "anthropic/claude-opus-4-5", @@ -1627,7 +1627,7 @@ describe("legacy config detection", () => { res.config?.agents?.defaults?.models?.["anthropic/claude-opus-4-5"], ).toMatchObject({ alias: "Opus" }); expect( - res.config?.agents?.defaults?.models?.["openai/gpt-4.1-mini"], + res.config?.agents?.defaults?.models?.["openai/gpt-5-nano"], ).toBeTruthy(); expect(res.config?.agent).toBeUndefined(); }); diff --git a/src/cron/isolated-agent.test.ts b/src/cron/isolated-agent.test.ts index 713bfd4d71..1c579bc48e 100644 --- a/src/cron/isolated-agent.test.ts +++ b/src/cron/isolated-agent.test.ts @@ -213,7 +213,7 @@ describe("runCronIsolatedAgentTurn", () => { job: makeJob({ kind: "agentTurn", message: "do it", - model: "openai/gpt-4.1-mini", + model: "openai/gpt-5-nano", }), message: "do it", sessionKey: "cron:job-1", @@ -226,7 +226,7 @@ describe("runCronIsolatedAgentTurn", () => { model?: string; }; expect(call?.provider).toBe("openai"); - expect(call?.model).toBe("gpt-4.1-mini"); + expect(call?.model).toBe("gpt-5-nano"); }); }); diff --git a/src/gateway/hooks-mapping.test.ts b/src/gateway/hooks-mapping.test.ts index 2a013e0fb1..d1b497ad2e 100644 --- a/src/gateway/hooks-mapping.test.ts +++ b/src/gateway/hooks-mapping.test.ts @@ -46,7 +46,7 @@ describe("hooks mapping", () => { match: { path: "gmail" }, action: "agent", messageTemplate: "Subject: {{messages[0].subject}}", - model: "openai/gpt-4.1-mini", + model: "openai/gpt-5-nano", }, ], }); @@ -58,7 +58,7 @@ describe("hooks mapping", () => { }); expect(result?.ok).toBe(true); if (result?.ok && result.action.kind === "agent") { - expect(result.action.model).toBe("openai/gpt-4.1-mini"); + expect(result.action.model).toBe("openai/gpt-5-nano"); } }); diff --git a/src/gateway/server.hooks.test.ts b/src/gateway/server.hooks.test.ts index 07b863062f..1ec642baaa 100644 --- a/src/gateway/server.hooks.test.ts +++ b/src/gateway/server.hooks.test.ts @@ -88,7 +88,7 @@ describe("gateway server hooks", () => { body: JSON.stringify({ message: "Do it", name: "Email", - model: "openai/gpt-4.1-mini", + model: "openai/gpt-5-nano", }), }); expect(res.status).toBe(202); @@ -96,7 +96,7 @@ describe("gateway server hooks", () => { const call = cronIsolatedRun.mock.calls[0]?.[0] as { job?: { payload?: { model?: string } }; }; - expect(call?.job?.payload?.model).toBe("openai/gpt-4.1-mini"); + expect(call?.job?.payload?.model).toBe("openai/gpt-5-nano"); drainSystemEvents(resolveMainKey()); await server.close(); });