fix(onboard): restore openai-responses API for all Azure URLs
Regression introduced in 91104ac740 broke local vLLM endpoints that rely on openai-responses API compatibility. The commit narrowed Azure detection from 'isAzure' to 'isAzureOpenAi' (*.openai.azure.com only), causing non-Azure OpenAI-compatible endpoints to route to /v1/chat/completions instead of /responses. This resulted in 404 errors for local vLLM setups configured with openai-responses mode. Fix: Revert providerApi logic to use 'isAzure' (covers both *.services.ai.azure.com and *.openai.azure.com) to preserve backward compatibility. Fixes #50719
This commit is contained in:
parent
55e12bd236
commit
ba811b4629
@ -506,7 +506,7 @@ describe("applyCustomApiConfig", () => {
|
||||
const provider = result.config.models?.providers?.[providerId];
|
||||
|
||||
expect(provider?.baseUrl).toBe("https://my-resource.services.ai.azure.com/openai/v1");
|
||||
expect(provider?.api).toBe("openai-completions");
|
||||
expect(provider?.api).toBe("openai-responses");
|
||||
expect(provider?.authHeader).toBe(false);
|
||||
expect(provider?.headers).toEqual({ "api-key": "key123" });
|
||||
|
||||
|
||||
@ -686,7 +686,7 @@ export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): Custom
|
||||
normalizeOptionalProviderApiKey(params.apiKey) ??
|
||||
normalizeOptionalProviderApiKey(existingApiKey);
|
||||
|
||||
const providerApi = isAzureOpenAi
|
||||
const providerApi = isAzure
|
||||
? ("openai-responses" as const)
|
||||
: resolveProviderApi(params.compatibility);
|
||||
const azureHeaders = isAzure && normalizedApiKey ? { "api-key": normalizedApiKey } : undefined;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user