mirror of
https://github.com/openclaw/openclaw.git
synced 2026-02-19 18:39:20 -05:00
fix: finalize vLLM onboarding integration (#12577) (thanks @gejifeng)
This commit is contained in:
@@ -13,6 +13,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Auto-reply/Threading: auto-inject implicit reply threading so `replyToMode` works without requiring model-emitted `[[reply_to_current]]`, while preserving `replyToMode: "off"` behavior for implicit Slack replies and keeping block-streaming chunk coalescing stable under `replyToMode: "first"`. (#14976) Thanks @Diaspar4u.
|
||||
- Sandbox: pass configured `sandbox.docker.env` variables to sandbox containers at `docker create` time. (#15138) Thanks @stevebot-alive.
|
||||
- Onboarding/CLI: restore terminal state without resuming paused `stdin`, so onboarding exits cleanly after choosing Web UI and the installer returns instead of appearing stuck.
|
||||
- Onboarding/Providers: add vLLM as an onboarding provider with model discovery, auth profile wiring, and non-interactive auth-choice validation. (#12577) Thanks @gejifeng.
|
||||
- macOS Voice Wake: fix a crash in trigger trimming for CJK/Unicode transcripts by matching and slicing on original-string ranges instead of transformed-string indices. (#11052) Thanks @Flash-LHR.
|
||||
- Heartbeat: prevent scheduler silent-death races during runner reloads, preserve retry cooldown backoff under wake bursts, and prioritize user/action wake causes over interval/retry reasons when coalescing. (#15108) Thanks @joeykrug.
|
||||
- Outbound targets: fail closed for WhatsApp/Twitch/Google Chat fallback paths so invalid or missing targets are dropped instead of rerouted, and align resolver hints with strict target requirements. (#13578) Thanks @mcaxtr.
|
||||
|
||||
@@ -121,6 +121,12 @@ interface OllamaTagsResponse {
|
||||
models: OllamaModel[];
|
||||
}
|
||||
|
||||
type VllmModelsResponse = {
|
||||
data?: Array<{
|
||||
id?: string;
|
||||
}>;
|
||||
};
|
||||
|
||||
/**
|
||||
* Derive the Ollama native API base URL from a configured base URL.
|
||||
*
|
||||
@@ -139,11 +145,6 @@ export function resolveOllamaApiBase(configuredBaseUrl?: string): string {
|
||||
}
|
||||
|
||||
async function discoverOllamaModels(baseUrl?: string): Promise<ModelDefinitionConfig[]> {
|
||||
type VllmModelsResponse = {
|
||||
data?: Array<{
|
||||
id?: string;
|
||||
}>;
|
||||
};
|
||||
// Skip Ollama discovery in test environments
|
||||
if (process.env.VITEST || process.env.NODE_ENV === "test") {
|
||||
return [];
|
||||
@@ -470,14 +471,6 @@ function buildMoonshotProvider(): ProviderConfig {
|
||||
};
|
||||
}
|
||||
|
||||
function buildTogetherProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: TOGETHER_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: TOGETHER_MODEL_CATALOG.map(buildTogetherModelDefinition),
|
||||
};
|
||||
}
|
||||
|
||||
function buildQwenPortalProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: QWEN_PORTAL_BASE_URL,
|
||||
|
||||
Reference in New Issue
Block a user