From a39def207969949864e8ca287920d31ba514720f Mon Sep 17 00:00:00 2001 From: OpenClaw Date: Fri, 20 Mar 2026 08:15:42 +0700 Subject: [PATCH] fix: use openai-responses API for self-hosted providers Local OpenAI-compatible providers (vLLM, Ollama, etc.) use the modern /v1/chat/completions endpoint, not the legacy /v1/completions endpoint. The recent refactoring in commit 3b79494 inadvertently set the API type to 'openai-completions' which targets /v1/completions, causing 404 errors for providers like vLLM that only implement /v1/chat/completions. This change updates the API type to 'openai-responses' which correctly targets the /v1/chat/completions endpoint that vLLM and other modern OpenAI-compatible servers implement. Fixes openclaw/openclaw#50719 --- src/plugins/provider-self-hosted-setup.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/provider-self-hosted-setup.ts b/src/plugins/provider-self-hosted-setup.ts index db7223ed987..a24a8fd7263 100644 --- a/src/plugins/provider-self-hosted-setup.ts +++ b/src/plugins/provider-self-hosted-setup.ts @@ -67,7 +67,7 @@ function buildOpenAICompatibleSelfHostedProviderConfig(params: { ...params.cfg.models?.providers, [params.providerId]: { baseUrl: params.baseUrl, - api: "openai-completions", + api: "openai-responses", apiKey: params.providerApiKey, models: [ {