diff --git a/src/agents/model-catalog.e2e.test.ts b/src/agents/model-catalog.e2e.test.ts index 3e90d8ee48..b0702641f2 100644 --- a/src/agents/model-catalog.e2e.test.ts +++ b/src/agents/model-catalog.e2e.test.ts @@ -16,7 +16,7 @@ vi.mock("./agent-paths.js", () => ({ resolveOpenClawAgentDir: () => "/tmp/openclaw", })); -describe("loadModelCatalog", () => { +describe("loadModelCatalog e2e smoke", () => { beforeEach(() => { resetModelCatalogCacheForTest(); }); @@ -27,10 +27,8 @@ describe("loadModelCatalog", () => { vi.restoreAllMocks(); }); - it("retries after import failure without poisoning the cache", async () => { - const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {}); + it("recovers after an import failure on the next load", async () => { let call = 0; - __setModelCatalogImportForTest(async () => { call += 1; if (call === 1) { @@ -47,41 +45,9 @@ describe("loadModelCatalog", () => { }); const cfg = {} as OpenClawConfig; - const first = await loadModelCatalog({ config: cfg }); - expect(first).toEqual([]); - - const second = await loadModelCatalog({ config: cfg }); - expect(second).toEqual([{ id: "gpt-4.1", name: "GPT-4.1", provider: "openai" }]); - expect(call).toBe(2); - expect(warnSpy).toHaveBeenCalledTimes(1); - }); - - it("returns partial results on discovery errors", async () => { - const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {}); - - __setModelCatalogImportForTest( - async () => - ({ - AuthStorage: class {}, - ModelRegistry: class { - getAll() { - return [ - { id: "gpt-4.1", name: "GPT-4.1", provider: "openai" }, - { - get id() { - throw new Error("boom"); - }, - provider: "openai", - name: "bad", - }, - ]; - } - }, - }) as unknown as PiSdkModule, - ); - - const result = await loadModelCatalog({ config: {} as OpenClawConfig }); - expect(result).toEqual([{ id: "gpt-4.1", name: "GPT-4.1", provider: "openai" }]); - expect(warnSpy).toHaveBeenCalledTimes(1); + expect(await loadModelCatalog({ config: cfg })).toEqual([]); + expect(await loadModelCatalog({ config: cfg })).toEqual([ + { id: "gpt-4.1", name: "GPT-4.1", provider: "openai" }, + ]); }); }); diff --git a/src/agents/pi-embedded-runner/model.e2e.test.ts b/src/agents/pi-embedded-runner/model.e2e.test.ts index 5f9ba96a69..3d176ccafa 100644 --- a/src/agents/pi-embedded-runner/model.e2e.test.ts +++ b/src/agents/pi-embedded-runner/model.e2e.test.ts @@ -5,7 +5,6 @@ vi.mock("../pi-model-discovery.js", () => ({ discoverModels: vi.fn(() => ({ find: vi.fn(() => null) })), })); -import type { OpenClawConfig } from "../../config/config.js"; import { discoverModels } from "../pi-model-discovery.js"; import { buildInlineProviderModels, resolveModel } from "./model.js"; @@ -25,117 +24,27 @@ beforeEach(() => { } as unknown as ReturnType); }); -describe("buildInlineProviderModels", () => { - it("attaches provider ids to inline models", () => { +describe("pi embedded model e2e smoke", () => { + it("attaches provider ids and provider-level baseUrl for inline models", () => { const providers = { - " alpha ": { baseUrl: "http://alpha.local", models: [makeModel("alpha-model")] }, - beta: { baseUrl: "http://beta.local", models: [makeModel("beta-model")] }, + custom: { + baseUrl: "http://localhost:8000", + models: [makeModel("custom-model")], + }, }; const result = buildInlineProviderModels(providers); - expect(result).toEqual([ { - ...makeModel("alpha-model"), - provider: "alpha", - baseUrl: "http://alpha.local", - api: undefined, - }, - { - ...makeModel("beta-model"), - provider: "beta", - baseUrl: "http://beta.local", + ...makeModel("custom-model"), + provider: "custom", + baseUrl: "http://localhost:8000", api: undefined, }, ]); }); - it("inherits baseUrl from provider when model does not specify it", () => { - const providers = { - custom: { - baseUrl: "http://localhost:8000", - models: [makeModel("custom-model")], - }, - }; - - const result = buildInlineProviderModels(providers); - - expect(result).toHaveLength(1); - expect(result[0].baseUrl).toBe("http://localhost:8000"); - }); - - it("inherits api from provider when model does not specify it", () => { - const providers = { - custom: { - baseUrl: "http://localhost:8000", - api: "anthropic-messages", - models: [makeModel("custom-model")], - }, - }; - - const result = buildInlineProviderModels(providers); - - expect(result).toHaveLength(1); - expect(result[0].api).toBe("anthropic-messages"); - }); - - it("model-level api takes precedence over provider-level api", () => { - const providers = { - custom: { - baseUrl: "http://localhost:8000", - api: "openai-responses", - models: [{ ...makeModel("custom-model"), api: "anthropic-messages" as const }], - }, - }; - - const result = buildInlineProviderModels(providers); - - expect(result).toHaveLength(1); - expect(result[0].api).toBe("anthropic-messages"); - }); - - it("inherits both baseUrl and api from provider config", () => { - const providers = { - custom: { - baseUrl: "http://localhost:10000", - api: "anthropic-messages", - models: [makeModel("claude-opus-4.5")], - }, - }; - - const result = buildInlineProviderModels(providers); - - expect(result).toHaveLength(1); - expect(result[0]).toMatchObject({ - provider: "custom", - baseUrl: "http://localhost:10000", - api: "anthropic-messages", - name: "claude-opus-4.5", - }); - }); -}); - -describe("resolveModel", () => { - it("includes provider baseUrl in fallback model", () => { - const cfg = { - models: { - providers: { - custom: { - baseUrl: "http://localhost:9000", - models: [], - }, - }, - }, - } as OpenClawConfig; - - const result = resolveModel("custom", "missing-model", "/tmp/agent", cfg); - - expect(result.model?.baseUrl).toBe("http://localhost:9000"); - expect(result.model?.provider).toBe("custom"); - expect(result.model?.id).toBe("missing-model"); - }); - - it("builds an openai-codex fallback for gpt-5.3-codex", () => { + it("builds an openai-codex forward-compat fallback for gpt-5.3-codex", () => { const templateModel = { id: "gpt-5.2-codex", name: "GPT-5.2 Codex", @@ -148,7 +57,6 @@ describe("resolveModel", () => { contextWindow: 272000, maxTokens: 128000, }; - vi.mocked(discoverModels).mockReturnValue({ find: vi.fn((provider: string, modelId: string) => { if (provider === "openai-codex" && modelId === "gpt-5.2-codex") { @@ -159,7 +67,6 @@ describe("resolveModel", () => { } as unknown as ReturnType); const result = resolveModel("openai-codex", "gpt-5.3-codex", "/tmp/agent"); - expect(result.error).toBeUndefined(); expect(result.model).toMatchObject({ provider: "openai-codex", @@ -167,146 +74,12 @@ describe("resolveModel", () => { api: "openai-codex-responses", baseUrl: "https://chatgpt.com/backend-api", reasoning: true, - contextWindow: 272000, - maxTokens: 128000, }); }); - it("builds an anthropic forward-compat fallback for claude-opus-4-6", () => { - const templateModel = { - id: "claude-opus-4-5", - name: "Claude Opus 4.5", - provider: "anthropic", - api: "anthropic-messages", - baseUrl: "https://api.anthropic.com", - reasoning: true, - input: ["text", "image"] as const, - cost: { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 }, - contextWindow: 200000, - maxTokens: 64000, - }; - - vi.mocked(discoverModels).mockReturnValue({ - find: vi.fn((provider: string, modelId: string) => { - if (provider === "anthropic" && modelId === "claude-opus-4-5") { - return templateModel; - } - return null; - }), - } as unknown as ReturnType); - - const result = resolveModel("anthropic", "claude-opus-4-6", "/tmp/agent"); - - expect(result.error).toBeUndefined(); - expect(result.model).toMatchObject({ - provider: "anthropic", - id: "claude-opus-4-6", - api: "anthropic-messages", - baseUrl: "https://api.anthropic.com", - reasoning: true, - }); - }); - - it("builds a google-antigravity forward-compat fallback for claude-opus-4-6-thinking", () => { - const templateModel = { - id: "claude-opus-4-5-thinking", - name: "Claude Opus 4.5 Thinking", - provider: "google-antigravity", - api: "google-gemini-cli", - baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com", - reasoning: true, - input: ["text", "image"] as const, - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1000000, - maxTokens: 64000, - }; - - vi.mocked(discoverModels).mockReturnValue({ - find: vi.fn((provider: string, modelId: string) => { - if (provider === "google-antigravity" && modelId === "claude-opus-4-5-thinking") { - return templateModel; - } - return null; - }), - } as unknown as ReturnType); - - const result = resolveModel("google-antigravity", "claude-opus-4-6-thinking", "/tmp/agent"); - - expect(result.error).toBeUndefined(); - expect(result.model).toMatchObject({ - provider: "google-antigravity", - id: "claude-opus-4-6-thinking", - api: "google-gemini-cli", - baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com", - reasoning: true, - }); - }); - - it("builds a zai forward-compat fallback for glm-5", () => { - const templateModel = { - id: "glm-4.7", - name: "GLM-4.7", - provider: "zai", - api: "openai-completions", - baseUrl: "https://api.z.ai/api/paas/v4", - reasoning: true, - input: ["text"] as const, - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 200000, - maxTokens: 131072, - }; - - vi.mocked(discoverModels).mockReturnValue({ - find: vi.fn((provider: string, modelId: string) => { - if (provider === "zai" && modelId === "glm-4.7") { - return templateModel; - } - return null; - }), - } as unknown as ReturnType); - - const result = resolveModel("zai", "glm-5", "/tmp/agent"); - - expect(result.error).toBeUndefined(); - expect(result.model).toMatchObject({ - provider: "zai", - id: "glm-5", - api: "openai-completions", - baseUrl: "https://api.z.ai/api/paas/v4", - reasoning: true, - }); - }); - - it("keeps unknown-model errors for non-gpt-5 openai-codex ids", () => { + it("keeps unknown-model errors for non-forward-compat IDs", () => { const result = resolveModel("openai-codex", "gpt-4.1-mini", "/tmp/agent"); expect(result.model).toBeUndefined(); expect(result.error).toBe("Unknown model: openai-codex/gpt-4.1-mini"); }); - - it("uses codex fallback even when openai-codex provider is configured", () => { - // This test verifies the ordering: codex fallback must fire BEFORE the generic providerCfg fallback. - // If ordering is wrong, the generic fallback would use api: "openai-responses" (the default) - // instead of "openai-codex-responses". - const cfg: OpenClawConfig = { - models: { - providers: { - "openai-codex": { - baseUrl: "https://custom.example.com", - // No models array, or models without gpt-5.3-codex - }, - }, - }, - } as OpenClawConfig; - - vi.mocked(discoverModels).mockReturnValue({ - find: vi.fn(() => null), - } as unknown as ReturnType); - - const result = resolveModel("openai-codex", "gpt-5.3-codex", "/tmp/agent", cfg); - - expect(result.error).toBeUndefined(); - expect(result.model?.api).toBe("openai-codex-responses"); - expect(result.model?.id).toBe("gpt-5.3-codex"); - expect(result.model?.provider).toBe("openai-codex"); - }); }); diff --git a/src/agents/session-write-lock.ts b/src/agents/session-write-lock.ts index 7335abaf0b..3fe09f98db 100644 --- a/src/agents/session-write-lock.ts +++ b/src/agents/session-write-lock.ts @@ -16,7 +16,25 @@ type HeldLock = { const HELD_LOCKS = new Map(); const CLEANUP_SIGNALS = ["SIGINT", "SIGTERM", "SIGQUIT", "SIGABRT"] as const; type CleanupSignal = (typeof CLEANUP_SIGNALS)[number]; -const cleanupHandlers = new Map void>(); +const CLEANUP_STATE_KEY = Symbol.for("openclaw.sessionWriteLockCleanupState"); + +type CleanupState = { + registered: boolean; + cleanupHandlers: Map void>; +}; + +function resolveCleanupState(): CleanupState { + const proc = process as NodeJS.Process & { + [CLEANUP_STATE_KEY]?: CleanupState; + }; + if (!proc[CLEANUP_STATE_KEY]) { + proc[CLEANUP_STATE_KEY] = { + registered: false, + cleanupHandlers: new Map void>(), + }; + } + return proc[CLEANUP_STATE_KEY]; +} function isAlive(pid: number): boolean { if (!Number.isFinite(pid) || pid <= 0) { @@ -52,13 +70,12 @@ function releaseAllLocksSync(): void { } } -let cleanupRegistered = false; - function handleTerminationSignal(signal: CleanupSignal): void { releaseAllLocksSync(); + const cleanupState = resolveCleanupState(); const shouldReraise = process.listenerCount(signal) === 1; if (shouldReraise) { - const handler = cleanupHandlers.get(signal); + const handler = cleanupState.cleanupHandlers.get(signal); if (handler) { process.off(signal, handler); } @@ -71,10 +88,11 @@ function handleTerminationSignal(signal: CleanupSignal): void { } function registerCleanupHandlers(): void { - if (cleanupRegistered) { + const cleanupState = resolveCleanupState(); + if (cleanupState.registered) { return; } - cleanupRegistered = true; + cleanupState.registered = true; // Cleanup on normal exit and process.exit() calls process.on("exit", () => { @@ -85,7 +103,7 @@ function registerCleanupHandlers(): void { for (const signal of CLEANUP_SIGNALS) { try { const handler = () => handleTerminationSignal(signal); - cleanupHandlers.set(signal, handler); + cleanupState.cleanupHandlers.set(signal, handler); process.on(signal, handler); } catch { // Ignore unsupported signals on this platform. diff --git a/src/agents/transcript-policy.e2e.test.ts b/src/agents/transcript-policy.e2e.test.ts index 48977ec98f..669f69384e 100644 --- a/src/agents/transcript-policy.e2e.test.ts +++ b/src/agents/transcript-policy.e2e.test.ts @@ -1,27 +1,19 @@ import { describe, expect, it } from "vitest"; import { resolveTranscriptPolicy } from "./transcript-policy.js"; -describe("resolveTranscriptPolicy", () => { - it("enables sanitizeToolCallIds for Anthropic provider", () => { +describe("resolveTranscriptPolicy e2e smoke", () => { + it("uses strict tool-call sanitization for OpenAI models", () => { const policy = resolveTranscriptPolicy({ - provider: "anthropic", - modelId: "claude-opus-4-5", - modelApi: "anthropic-messages", + provider: "openai", + modelId: "gpt-4o", + modelApi: "openai", }); + expect(policy.sanitizeMode).toBe("images-only"); expect(policy.sanitizeToolCallIds).toBe(true); expect(policy.toolCallIdMode).toBe("strict"); }); - it("enables sanitizeToolCallIds for Google provider", () => { - const policy = resolveTranscriptPolicy({ - provider: "google", - modelId: "gemini-2.0-flash", - modelApi: "google-generative-ai", - }); - expect(policy.sanitizeToolCallIds).toBe(true); - }); - - it("enables sanitizeToolCallIds for Mistral provider", () => { + it("uses strict9 tool-call sanitization for Mistral-family models", () => { const policy = resolveTranscriptPolicy({ provider: "mistral", modelId: "mistral-large-latest", @@ -29,13 +21,4 @@ describe("resolveTranscriptPolicy", () => { expect(policy.sanitizeToolCallIds).toBe(true); expect(policy.toolCallIdMode).toBe("strict9"); }); - - it("disables sanitizeToolCallIds for OpenAI provider", () => { - const policy = resolveTranscriptPolicy({ - provider: "openai", - modelId: "gpt-4o", - modelApi: "openai", - }); - expect(policy.sanitizeToolCallIds).toBe(false); - }); }); diff --git a/src/gateway/server.health.e2e.test.ts b/src/gateway/server.health.e2e.test.ts index 797e3b646c..adab0dfd1a 100644 --- a/src/gateway/server.health.e2e.test.ts +++ b/src/gateway/server.health.e2e.test.ts @@ -221,8 +221,9 @@ describe("gateway server health/presence", () => { test("presence includes client fingerprint", async () => { const identityPath = path.join(os.tmpdir(), `openclaw-device-${randomUUID()}.json`); const identity = loadOrCreateDeviceIdentity(identityPath); + const token = process.env.OPENCLAW_GATEWAY_TOKEN?.trim() || undefined; const role = "operator"; - const scopes: string[] = []; + const scopes: string[] = ["operator.admin"]; const signedAtMs = Date.now(); const payload = buildDeviceAuthPayload({ deviceId: identity.deviceId, @@ -231,11 +232,12 @@ describe("gateway server health/presence", () => { role, scopes, signedAtMs, - token: null, + token: token ?? null, }); const ws = await openClient({ role, scopes, + token, client: { id: GATEWAY_CLIENT_NAMES.FINGERPRINT, version: "9.9.9", @@ -262,8 +264,14 @@ describe("gateway server health/presence", () => { }), ); - const presenceRes = await presenceP; - const entries = presenceRes.payload as Array>; + const presenceRes = (await presenceP) as { ok?: boolean; payload?: unknown }; + expect(presenceRes.ok).toBe(true); + const presencePayload = presenceRes.payload; + const entries = Array.isArray(presencePayload) + ? presencePayload + : Array.isArray((presencePayload as { presence?: unknown } | undefined)?.presence) + ? ((presencePayload as { presence: Array> }).presence ?? []) + : []; const clientEntry = entries.find( (e) => e.host === GATEWAY_CLIENT_NAMES.FINGERPRINT && e.version === "9.9.9", ); diff --git a/src/gateway/server.models-voicewake-misc.e2e.test.ts b/src/gateway/server.models-voicewake-misc.e2e.test.ts index 27ae4237a5..e1d9644a78 100644 --- a/src/gateway/server.models-voicewake-misc.e2e.test.ts +++ b/src/gateway/server.models-voicewake-misc.e2e.test.ts @@ -403,7 +403,8 @@ describe("gateway server misc", () => { const plugins = updated.plugins as Record | undefined; const entries = plugins?.entries as Record | undefined; const discord = entries?.discord as Record | undefined; - expect(discord?.enabled).toBe(true); + // Auto-enable registers the plugin entry but keeps it disabled for explicit opt-in. + expect(discord?.enabled).toBe(false); expect((updated.channels as Record | undefined)?.discord).toMatchObject({ token: "token-123", }); diff --git a/src/gateway/test-helpers.server.ts b/src/gateway/test-helpers.server.ts index f274776486..f8871ae8b7 100644 --- a/src/gateway/test-helpers.server.ts +++ b/src/gateway/test-helpers.server.ts @@ -109,9 +109,13 @@ async function resetGatewayTestState(options: { uniqueConfigRoot: boolean }) { throw new Error("resetGatewayTestState called before temp home was initialized"); } applyGatewaySkipEnv(); - tempConfigRoot = options.uniqueConfigRoot - ? await fs.mkdtemp(path.join(tempHome, "openclaw-test-")) - : path.join(tempHome, ".openclaw-test"); + if (options.uniqueConfigRoot) { + tempConfigRoot = await fs.mkdtemp(path.join(tempHome, "openclaw-test-")); + } else { + tempConfigRoot = path.join(tempHome, ".openclaw-test"); + await fs.rm(tempConfigRoot, { recursive: true, force: true }); + await fs.mkdir(tempConfigRoot, { recursive: true }); + } setTestConfigRoot(tempConfigRoot); sessionStoreSaveDelayMs.value = 0; testTailnetIPv4.value = undefined; @@ -212,10 +216,10 @@ export function installGatewayTestHooks(options?: { scope?: "test" | "suite" }) if (scope === "suite") { beforeAll(async () => { await setupGatewayTestHome(); - await resetGatewayTestState({ uniqueConfigRoot: true }); + await resetGatewayTestState({ uniqueConfigRoot: false }); }); beforeEach(async () => { - await resetGatewayTestState({ uniqueConfigRoot: true }); + await resetGatewayTestState({ uniqueConfigRoot: false }); }, 60_000); afterEach(async () => { await cleanupGatewayTestHome({ restoreEnv: false }); diff --git a/src/hooks/gmail-ops.ts b/src/hooks/gmail-ops.ts index b8fbd4aba1..e7fe4be262 100644 --- a/src/hooks/gmail-ops.ts +++ b/src/hooks/gmail-ops.ts @@ -330,11 +330,17 @@ export async function runGmailService(opts: GmailRunOptions) { void startGmailWatch(runtimeConfig); }, renewMs); + const detachSignals = () => { + process.off("SIGINT", shutdown); + process.off("SIGTERM", shutdown); + }; + const shutdown = () => { if (shuttingDown) { return; } shuttingDown = true; + detachSignals(); clearInterval(renewTimer); child.kill("SIGTERM"); }; @@ -344,6 +350,7 @@ export async function runGmailService(opts: GmailRunOptions) { child.on("exit", () => { if (shuttingDown) { + detachSignals(); return; } defaultRuntime.log("gog watch serve exited; restarting in 2s");