From eb775ff24b9af4b1a4aa89b72a91f6c4831c06bf Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Wed, 18 Feb 2026 13:12:35 +0000 Subject: [PATCH] test(media): dedupe audio provider request assertions --- .../providers/audio.test-helpers.ts | 34 +++++++++++ .../providers/deepgram/audio.test.ts | 57 +++++++++---------- .../providers/openai/audio.test.ts | 45 ++++++++------- 3 files changed, 84 insertions(+), 52 deletions(-) diff --git a/src/media-understanding/providers/audio.test-helpers.ts b/src/media-understanding/providers/audio.test-helpers.ts index 190465a458..673e087004 100644 --- a/src/media-understanding/providers/audio.test-helpers.ts +++ b/src/media-understanding/providers/audio.test-helpers.ts @@ -1,6 +1,7 @@ import type { MockInstance } from "vitest"; import { afterEach, beforeEach, vi } from "vitest"; import * as ssrf from "../../infra/net/ssrf.js"; +import { withFetchPreconnect } from "../../test-utils/fetch-mock.js"; export function resolveRequestUrl(input: RequestInfo | URL): string { if (typeof input === "string") { @@ -40,3 +41,36 @@ export function installPinnedHostnameTestHooks(): void { resolvePinnedHostnameWithPolicySpy = null; }); } + +export function createAuthCaptureJsonFetch(responseBody: unknown) { + let seenAuth: string | null = null; + const fetchFn = withFetchPreconnect(async (_input: RequestInfo | URL, init?: RequestInit) => { + const headers = new Headers(init?.headers); + seenAuth = headers.get("authorization"); + return new Response(JSON.stringify(responseBody), { + status: 200, + headers: { "content-type": "application/json" }, + }); + }); + return { + fetchFn, + getAuthHeader: () => seenAuth, + }; +} + +export function createRequestCaptureJsonFetch(responseBody: unknown) { + let seenUrl: string | null = null; + let seenInit: RequestInit | undefined; + const fetchFn = withFetchPreconnect(async (input: RequestInfo | URL, init?: RequestInit) => { + seenUrl = resolveRequestUrl(input); + seenInit = init; + return new Response(JSON.stringify(responseBody), { + status: 200, + headers: { "content-type": "application/json" }, + }); + }); + return { + fetchFn, + getRequest: () => ({ url: seenUrl, init: seenInit }), + }; +} diff --git a/src/media-understanding/providers/deepgram/audio.test.ts b/src/media-understanding/providers/deepgram/audio.test.ts index dab4f9b0fc..08cfac7598 100644 --- a/src/media-understanding/providers/deepgram/audio.test.ts +++ b/src/media-understanding/providers/deepgram/audio.test.ts @@ -1,25 +1,17 @@ import { describe, expect, it } from "vitest"; -import { withFetchPreconnect } from "../../../test-utils/fetch-mock.js"; -import { installPinnedHostnameTestHooks, resolveRequestUrl } from "../audio.test-helpers.js"; +import { + createAuthCaptureJsonFetch, + createRequestCaptureJsonFetch, + installPinnedHostnameTestHooks, +} from "../audio.test-helpers.js"; import { transcribeDeepgramAudio } from "./audio.js"; installPinnedHostnameTestHooks(); describe("transcribeDeepgramAudio", () => { it("respects lowercase authorization header overrides", async () => { - let seenAuth: string | null = null; - const fetchFn = withFetchPreconnect(async (_input: RequestInfo | URL, init?: RequestInit) => { - const headers = new Headers(init?.headers); - seenAuth = headers.get("authorization"); - return new Response( - JSON.stringify({ - results: { channels: [{ alternatives: [{ transcript: "ok" }] }] }, - }), - { - status: 200, - headers: { "content-type": "application/json" }, - }, - ); + const { fetchFn, getAuthHeader } = createAuthCaptureJsonFetch({ + results: { channels: [{ alternatives: [{ transcript: "ok" }] }] }, }); const result = await transcribeDeepgramAudio({ @@ -31,25 +23,13 @@ describe("transcribeDeepgramAudio", () => { fetchFn, }); - expect(seenAuth).toBe("Token override"); + expect(getAuthHeader()).toBe("Token override"); expect(result.text).toBe("ok"); }); it("builds the expected request payload", async () => { - let seenUrl: string | null = null; - let seenInit: RequestInit | undefined; - const fetchFn = withFetchPreconnect(async (input: RequestInfo | URL, init?: RequestInit) => { - seenUrl = resolveRequestUrl(input); - seenInit = init; - return new Response( - JSON.stringify({ - results: { channels: [{ alternatives: [{ transcript: "hello" }] }] }, - }), - { - status: 200, - headers: { "content-type": "application/json" }, - }, - ); + const { fetchFn, getRequest } = createRequestCaptureJsonFetch({ + results: { channels: [{ alternatives: [{ transcript: "hello" }] }] }, }); const result = await transcribeDeepgramAudio({ @@ -68,6 +48,7 @@ describe("transcribeDeepgramAudio", () => { }, fetchFn, }); + const { url: seenUrl, init: seenInit } = getRequest(); expect(result.model).toBe("nova-3"); expect(result.text).toBe("hello"); @@ -83,4 +64,20 @@ describe("transcribeDeepgramAudio", () => { expect(headers.get("content-type")).toBe("audio/wav"); expect(seenInit?.body).toBeInstanceOf(Uint8Array); }); + + it("throws when the provider response omits transcript", async () => { + const { fetchFn } = createRequestCaptureJsonFetch({ + results: { channels: [{ alternatives: [{}] }] }, + }); + + await expect( + transcribeDeepgramAudio({ + buffer: Buffer.from("audio-bytes"), + fileName: "voice.wav", + apiKey: "test-key", + timeoutMs: 1234, + fetchFn, + }), + ).rejects.toThrow("Audio transcription response missing transcript"); + }); }); diff --git a/src/media-understanding/providers/openai/audio.test.ts b/src/media-understanding/providers/openai/audio.test.ts index 1e80a9ed52..aeafb6f2ae 100644 --- a/src/media-understanding/providers/openai/audio.test.ts +++ b/src/media-understanding/providers/openai/audio.test.ts @@ -1,21 +1,16 @@ import { describe, expect, it } from "vitest"; -import { withFetchPreconnect } from "../../../test-utils/fetch-mock.js"; -import { installPinnedHostnameTestHooks, resolveRequestUrl } from "../audio.test-helpers.js"; +import { + createAuthCaptureJsonFetch, + createRequestCaptureJsonFetch, + installPinnedHostnameTestHooks, +} from "../audio.test-helpers.js"; import { transcribeOpenAiCompatibleAudio } from "./audio.js"; installPinnedHostnameTestHooks(); describe("transcribeOpenAiCompatibleAudio", () => { it("respects lowercase authorization header overrides", async () => { - let seenAuth: string | null = null; - const fetchFn = withFetchPreconnect(async (_input: RequestInfo | URL, init?: RequestInit) => { - const headers = new Headers(init?.headers); - seenAuth = headers.get("authorization"); - return new Response(JSON.stringify({ text: "ok" }), { - status: 200, - headers: { "content-type": "application/json" }, - }); - }); + const { fetchFn, getAuthHeader } = createAuthCaptureJsonFetch({ text: "ok" }); const result = await transcribeOpenAiCompatibleAudio({ buffer: Buffer.from("audio"), @@ -26,21 +21,12 @@ describe("transcribeOpenAiCompatibleAudio", () => { fetchFn, }); - expect(seenAuth).toBe("Bearer override"); + expect(getAuthHeader()).toBe("Bearer override"); expect(result.text).toBe("ok"); }); it("builds the expected request payload", async () => { - let seenUrl: string | null = null; - let seenInit: RequestInit | undefined; - const fetchFn = withFetchPreconnect(async (input: RequestInfo | URL, init?: RequestInit) => { - seenUrl = resolveRequestUrl(input); - seenInit = init; - return new Response(JSON.stringify({ text: "hello" }), { - status: 200, - headers: { "content-type": "application/json" }, - }); - }); + const { fetchFn, getRequest } = createRequestCaptureJsonFetch({ text: "hello" }); const result = await transcribeOpenAiCompatibleAudio({ buffer: Buffer.from("audio-bytes"), @@ -55,6 +41,7 @@ describe("transcribeOpenAiCompatibleAudio", () => { headers: { "X-Custom": "1" }, fetchFn, }); + const { url: seenUrl, init: seenInit } = getRequest(); expect(result.model).toBe("gpt-4o-mini-transcribe"); expect(result.text).toBe("hello"); @@ -80,4 +67,18 @@ describe("transcribeOpenAiCompatibleAudio", () => { } } }); + + it("throws when the provider response omits text", async () => { + const { fetchFn } = createRequestCaptureJsonFetch({}); + + await expect( + transcribeOpenAiCompatibleAudio({ + buffer: Buffer.from("audio-bytes"), + fileName: "voice.wav", + apiKey: "test-key", + timeoutMs: 1234, + fetchFn, + }), + ).rejects.toThrow("Audio transcription response missing text"); + }); });