From 3bcde8df328271b4320602f314631d8769efe905 Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Fri, 13 Feb 2026 15:47:30 +0100 Subject: [PATCH] fix: finalize vLLM onboarding integration (#12577) (thanks @gejifeng) --- CHANGELOG.md | 1 + src/agents/models-config.providers.ts | 19 ++++++------------- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b6cf77d85e..829a5d85ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ Docs: https://docs.openclaw.ai - Auto-reply/Threading: auto-inject implicit reply threading so `replyToMode` works without requiring model-emitted `[[reply_to_current]]`, while preserving `replyToMode: "off"` behavior for implicit Slack replies and keeping block-streaming chunk coalescing stable under `replyToMode: "first"`. (#14976) Thanks @Diaspar4u. - Sandbox: pass configured `sandbox.docker.env` variables to sandbox containers at `docker create` time. (#15138) Thanks @stevebot-alive. - Onboarding/CLI: restore terminal state without resuming paused `stdin`, so onboarding exits cleanly after choosing Web UI and the installer returns instead of appearing stuck. +- Onboarding/Providers: add vLLM as an onboarding provider with model discovery, auth profile wiring, and non-interactive auth-choice validation. (#12577) Thanks @gejifeng. - macOS Voice Wake: fix a crash in trigger trimming for CJK/Unicode transcripts by matching and slicing on original-string ranges instead of transformed-string indices. (#11052) Thanks @Flash-LHR. - Heartbeat: prevent scheduler silent-death races during runner reloads, preserve retry cooldown backoff under wake bursts, and prioritize user/action wake causes over interval/retry reasons when coalescing. (#15108) Thanks @joeykrug. - Outbound targets: fail closed for WhatsApp/Twitch/Google Chat fallback paths so invalid or missing targets are dropped instead of rerouted, and align resolver hints with strict target requirements. (#13578) Thanks @mcaxtr. diff --git a/src/agents/models-config.providers.ts b/src/agents/models-config.providers.ts index b07e67a0d3..9ad157ed2c 100644 --- a/src/agents/models-config.providers.ts +++ b/src/agents/models-config.providers.ts @@ -121,6 +121,12 @@ interface OllamaTagsResponse { models: OllamaModel[]; } +type VllmModelsResponse = { + data?: Array<{ + id?: string; + }>; +}; + /** * Derive the Ollama native API base URL from a configured base URL. * @@ -139,11 +145,6 @@ export function resolveOllamaApiBase(configuredBaseUrl?: string): string { } async function discoverOllamaModels(baseUrl?: string): Promise { -type VllmModelsResponse = { - data?: Array<{ - id?: string; - }>; -}; // Skip Ollama discovery in test environments if (process.env.VITEST || process.env.NODE_ENV === "test") { return []; @@ -470,14 +471,6 @@ function buildMoonshotProvider(): ProviderConfig { }; } -function buildTogetherProvider(): ProviderConfig { - return { - baseUrl: TOGETHER_BASE_URL, - api: "openai-completions", - models: TOGETHER_MODEL_CATALOG.map(buildTogetherModelDefinition), - }; -} - function buildQwenPortalProvider(): ProviderConfig { return { baseUrl: QWEN_PORTAL_BASE_URL,