From 91104ac74057bc75ce58dfb55ff01e877ec73a0a Mon Sep 17 00:00:00 2001 From: Ayaan Zaidi Date: Thu, 19 Mar 2026 22:04:33 +0530 Subject: [PATCH] fix(onboard): respect services.ai custom provider compatibility --- CHANGELOG.md | 1 + src/commands/onboard-custom.test.ts | 42 +++++++++++++++++++++++++++-- src/commands/onboard-custom.ts | 30 +++++++++++++-------- 3 files changed, 60 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 12cd1cb3095..b2c66c05ac5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -163,6 +163,7 @@ Docs: https://docs.openclaw.ai - Channels: stabilize lane harness and monitor tests (#50167) Thanks @joshavant. - WhatsApp/active-listener: pin the active listener registry to a `globalThis` singleton so split WhatsApp bundle chunks share one listener map and outbound sends stop missing the registered session. (#47433) Thanks @clawdia67. - Plugins/WhatsApp: share split-load singleton state for plugin command registration and active WhatsApp listeners so duplicate module graphs no longer lose native plugin commands or outbound listener state. (#50418) Thanks @huntharo. +- Onboarding/custom providers: keep Azure AI Foundry `*.services.ai.azure.com` custom endpoints on the selected compatibility path instead of forcing Responses, so chat-completions Foundry models still work after setup. Fixes #50528. ### Breaking diff --git a/src/commands/onboard-custom.test.ts b/src/commands/onboard-custom.test.ts index a8a6adc52f6..7917d45ca8f 100644 --- a/src/commands/onboard-custom.test.ts +++ b/src/commands/onboard-custom.test.ts @@ -225,6 +225,44 @@ describe("promptCustomApiConfig", () => { }); }); + it("uses Azure Foundry chat-completions probes for services.ai URLs", async () => { + const prompter = createTestPrompter({ + text: [ + "https://my-resource.services.ai.azure.com", + "azure-test-key", + "deepseek-v3-0324", + "custom", + "alias", + ], + select: ["plaintext", "openai"], + }); + const fetchMock = stubFetchSequence([{ ok: true }]); + + await runPromptCustomApi(prompter); + + const firstCall = fetchMock.mock.calls[0]; + const firstUrl = firstCall?.[0]; + const firstInit = firstCall?.[1] as + | { body?: string; headers?: Record } + | undefined; + if (typeof firstUrl !== "string") { + throw new Error("Expected first verification call URL"); + } + const parsedBody = JSON.parse(firstInit?.body ?? "{}"); + + expect(firstUrl).toBe( + "https://my-resource.services.ai.azure.com/openai/deployments/deepseek-v3-0324/chat/completions?api-version=2024-10-21", + ); + expect(firstInit?.headers?.["api-key"]).toBe("azure-test-key"); + expect(firstInit?.headers?.Authorization).toBeUndefined(); + expect(parsedBody).toEqual({ + model: "deepseek-v3-0324", + messages: [{ role: "user", content: "Hi" }], + max_tokens: 1, + stream: false, + }); + }); + it("uses expanded max_tokens for anthropic verification probes", async () => { const prompter = createTestPrompter({ text: ["https://example.com", "test-key", "detected-model", "custom", "alias"], @@ -456,7 +494,7 @@ describe("applyCustomApiConfig", () => { expect(result.config.agents?.defaults?.models?.[modelRef]?.params?.thinking).toBe("medium"); }); - it("produces azure-specific config for Azure AI Foundry URLs", () => { + it("keeps selected compatibility for Azure AI Foundry URLs", () => { const result = applyCustomApiConfig({ config: {}, baseUrl: "https://my-resource.services.ai.azure.com", @@ -468,7 +506,7 @@ describe("applyCustomApiConfig", () => { const provider = result.config.models?.providers?.[providerId]; expect(provider?.baseUrl).toBe("https://my-resource.services.ai.azure.com/openai/v1"); - expect(provider?.api).toBe("openai-responses"); + expect(provider?.api).toBe("openai-completions"); expect(provider?.authHeader).toBe(false); expect(provider?.headers).toEqual({ "api-key": "key123" }); diff --git a/src/commands/onboard-custom.ts b/src/commands/onboard-custom.ts index a24a113cbb7..5afab742448 100644 --- a/src/commands/onboard-custom.ts +++ b/src/commands/onboard-custom.ts @@ -29,22 +29,30 @@ function normalizeContextWindowForCustomModel(value: unknown): number { return parsed >= CONTEXT_WINDOW_HARD_MIN_TOKENS ? parsed : CONTEXT_WINDOW_HARD_MIN_TOKENS; } -/** - * Detects if a URL is from Azure AI Foundry or Azure OpenAI. - * Matches both: - * - https://*.services.ai.azure.com (Azure AI Foundry) - * - https://*.openai.azure.com (classic Azure OpenAI) - */ -function isAzureUrl(baseUrl: string): boolean { +function isAzureFoundryUrl(baseUrl: string): boolean { try { const url = new URL(baseUrl); const host = url.hostname.toLowerCase(); - return host.endsWith(".services.ai.azure.com") || host.endsWith(".openai.azure.com"); + return host.endsWith(".services.ai.azure.com"); } catch { return false; } } +function isAzureOpenAiUrl(baseUrl: string): boolean { + try { + const url = new URL(baseUrl); + const host = url.hostname.toLowerCase(); + return host.endsWith(".openai.azure.com"); + } catch { + return false; + } +} + +function isAzureUrl(baseUrl: string): boolean { + return isAzureFoundryUrl(baseUrl) || isAzureOpenAiUrl(baseUrl); +} + /** * Transforms an Azure AI Foundry/OpenAI URL to include the deployment path. * Azure requires: https://host/openai/deployments//chat/completions?api-version=2024-xx-xx-preview @@ -357,7 +365,7 @@ async function requestOpenAiVerification(params: { const headers = isBaseUrlAzureUrl ? buildAzureOpenAiHeaders(params.apiKey) : buildOpenAiHeaders(params.apiKey); - if (isBaseUrlAzureUrl) { + if (isAzureOpenAiUrl(params.baseUrl)) { const endpoint = new URL( "responses", transformAzureConfigUrl(params.baseUrl).replace(/\/?$/, "/"), @@ -611,7 +619,7 @@ export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): Custom } const isAzure = isAzureUrl(baseUrl); - // Transform Azure URLs to include the deployment path for API calls + const isAzureOpenAi = isAzureOpenAiUrl(baseUrl); const resolvedBaseUrl = isAzure ? transformAzureConfigUrl(baseUrl) : baseUrl; const providerIdResult = resolveCustomProviderId({ @@ -678,7 +686,7 @@ export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): Custom normalizeOptionalProviderApiKey(params.apiKey) ?? normalizeOptionalProviderApiKey(existingApiKey); - const providerApi = isAzure + const providerApi = isAzureOpenAi ? ("openai-responses" as const) : resolveProviderApi(params.compatibility); const azureHeaders = isAzure && normalizedApiKey ? { "api-key": normalizedApiKey } : undefined;