From 6d451c8205d9162b1fa60325e02a715c3492939b Mon Sep 17 00:00:00 2001 From: Sebastian <19554889+sebslight@users.noreply.github.com> Date: Mon, 16 Feb 2026 20:20:25 -0500 Subject: [PATCH] test(ollama): add reasoning fallback regression coverage --- src/agents/ollama-stream.test.ts | 62 ++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/src/agents/ollama-stream.test.ts b/src/agents/ollama-stream.test.ts index 1589f2f25c..177f1d0173 100644 --- a/src/agents/ollama-stream.test.ts +++ b/src/agents/ollama-stream.test.ts @@ -104,6 +104,22 @@ describe("buildAssistantMessage", () => { expect(result.usage.totalTokens).toBe(15); }); + it("falls back to reasoning when content is empty", () => { + const response = { + model: "qwen3:32b", + created_at: "2026-01-01T00:00:00Z", + message: { + role: "assistant" as const, + content: "", + reasoning: "Reasoning output", + }, + done: true, + }; + const result = buildAssistantMessage(response, modelInfo); + expect(result.stopReason).toBe("stop"); + expect(result.content).toEqual([{ type: "text", text: "Reasoning output" }]); + }); + it("builds response with tool calls", () => { const response = { model: "qwen3:32b", @@ -287,4 +303,50 @@ describe("createOllamaStreamFn", () => { globalThis.fetch = originalFetch; } }); + + it("accumulates reasoning chunks when content is empty", async () => { + const originalFetch = globalThis.fetch; + const fetchMock = vi.fn(async () => { + const payload = [ + '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"reasoned"},"done":false}', + '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":" output"},"done":false}', + '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', + ].join("\n"); + return new Response(`${payload}\n`, { + status: 200, + headers: { "Content-Type": "application/x-ndjson" }, + }); + }); + globalThis.fetch = fetchMock as unknown as typeof fetch; + + try { + const streamFn = createOllamaStreamFn("http://ollama-host:11434"); + const stream = streamFn( + { + id: "qwen3:32b", + api: "ollama", + provider: "custom-ollama", + contextWindow: 131072, + } as unknown as Parameters[0], + { + messages: [{ role: "user", content: "hello" }], + } as unknown as Parameters[1], + {} as unknown as Parameters[2], + ); + + const events = []; + for await (const event of stream) { + events.push(event); + } + + const doneEvent = events.at(-1); + if (!doneEvent || doneEvent.type !== "done") { + throw new Error("Expected done event"); + } + + expect(doneEvent.message.content).toEqual([{ type: "text", text: "reasoned output" }]); + } finally { + globalThis.fetch = originalFetch; + } + }); });