fix(provider): onboard azure custom endpoints via responses

This commit is contained in:
Ayaan Zaidi 2026-03-19 21:28:48 +05:30
parent 9d772d6eab
commit 7a57082466
3 changed files with 300 additions and 28 deletions

View File

@ -44,7 +44,6 @@ Docs: https://docs.openclaw.ai
- Control UI/chat: add an expand-to-canvas button on assistant chat bubbles and in-app session navigation from Sessions and Cron views. Thanks @BunsDev.
- Plugins/context engines: expose `delegateCompactionToRuntime(...)` on the public plugin SDK, refactor the legacy engine to use the shared helper, and clarify `ownsCompaction` delegation semantics for non-owning engines. (#49061) Thanks @jalehman.
- Plugins/MiniMax: add MiniMax-M2.7 and MiniMax-M2.7-highspeed models and update the default model from M2.5 to M2.7. (#49691) Thanks @liyuan97.
- Contracts/Matrix: validate Matrix session binding coverage through the real manager, expose the manager on the Matrix runtime API, and let tests pass an explicit state directory for isolated contract setup. (#50369) thanks @ChroniCat.
### Fixes
@ -93,6 +92,7 @@ Docs: https://docs.openclaw.ai
- Z.AI/onboarding: add `glm-5-turbo` to the default Z.AI provider catalog so onboarding-generated configs expose the new model alongside the existing GLM defaults. (#46670) Thanks @tomsun28.
- Zalo Personal/group gating: stop reapplying `dmPolicy.allowFrom` as a sender gate for already-allowlisted groups when `groupAllowFrom` is unset, so any member of an allowed group can trigger replies while DMs stay restricted. (#46663) Fixes #40146. Thanks @Takhoffman.
- Zalo/plugin runtime: export `resolveClientIp` from `openclaw/plugin-sdk/zalo` so installed builds no longer crash on startup when the webhook monitor loads from the packaged extension instead of the monorepo source tree. (#46549) Thanks @No898.
- Onboarding/custom providers: store Azure OpenAI and Azure AI Foundry custom endpoints with the Responses API config shape, normalized `/openai/v1` base URLs, and Azure-safe defaults so TUI and agent runs work after setup. (#49543) Thanks @kunalk16.
- Docker/live tests: mount external CLI auth homes into writable container copies, derive Codex OAuth expiry from JWT `exp`, refresh synced CLI creds instead of trusting stale cached expiry, and make gateway live probes wait on transcript output so `pnpm test:docker:all` stays green in Linux.
- Plugins/install precedence: keep bundled plugins ahead of auto-discovered globals by default, but let an explicitly installed plugin record win its own duplicate-id tie so installed channel plugins load from `~/.openclaw/extensions` after `openclaw plugins install`. (#46722) Thanks @Takhoffman.
- Control UI/logging: make browser-safe logger imports avoid eager temp-dir resolution so the bundled Control UI no longer crashes to a blank screen when logging reaches `tmp-openclaw-dir`. (#48469) Fixes #48062. Thanks @7inspire.

View File

@ -188,7 +188,7 @@ describe("promptCustomApiConfig", () => {
expect(JSON.parse(firstCall?.body ?? "{}")).toMatchObject({ max_tokens: 1 });
});
it("uses azure-specific headers and body for openai verification probes", async () => {
it("uses azure responses-specific headers and body for openai verification probes", async () => {
const prompter = createTestPrompter({
text: [
"https://my-resource.openai.azure.com",
@ -213,18 +213,16 @@ describe("promptCustomApiConfig", () => {
}
const parsedBody = JSON.parse(firstInit?.body ?? "{}");
expect(firstUrl).toContain("/openai/deployments/gpt-4.1/chat/completions");
expect(firstUrl).toContain("api-version=2024-10-21");
expect(firstUrl).toBe("https://my-resource.openai.azure.com/openai/v1/responses");
expect(firstInit?.headers?.["api-key"]).toBe("azure-test-key");
expect(firstInit?.headers?.Authorization).toBeUndefined();
expect(firstInit?.body).toBeDefined();
expect(parsedBody).toMatchObject({
messages: [{ role: "user", content: "Hi" }],
max_completion_tokens: 5,
expect(parsedBody).toEqual({
model: "gpt-4.1",
input: "Hi",
max_output_tokens: 1,
stream: false,
});
expect(parsedBody).not.toHaveProperty("model");
expect(parsedBody).not.toHaveProperty("max_tokens");
});
it("uses expanded max_tokens for anthropic verification probes", async () => {
@ -432,6 +430,192 @@ describe("applyCustomApiConfig", () => {
])("rejects $name", ({ params, expectedMessage }) => {
expect(() => applyCustomApiConfig(params)).toThrow(expectedMessage);
});
it("produces azure-specific config for Azure OpenAI URLs with reasoning model", () => {
const result = applyCustomApiConfig({
config: {},
baseUrl: "https://user123-resource.openai.azure.com",
modelId: "o4-mini",
compatibility: "openai",
apiKey: "abcd1234",
});
const providerId = result.providerId!;
const provider = result.config.models?.providers?.[providerId];
expect(provider?.baseUrl).toBe("https://user123-resource.openai.azure.com/openai/v1");
expect(provider?.api).toBe("openai-responses");
expect(provider?.authHeader).toBe(false);
expect(provider?.headers).toEqual({ "api-key": "abcd1234" });
const model = provider?.models?.find((m) => m.id === "o4-mini");
expect(model?.input).toEqual(["text", "image"]);
expect(model?.reasoning).toBe(true);
expect(model?.compat).toEqual({ supportsStore: false });
const modelRef = `${providerId}/${result.modelId}`;
expect(result.config.agents?.defaults?.models?.[modelRef]?.params?.thinking).toBe("medium");
});
it("produces azure-specific config for Azure AI Foundry URLs", () => {
const result = applyCustomApiConfig({
config: {},
baseUrl: "https://my-resource.services.ai.azure.com",
modelId: "gpt-4.1",
compatibility: "openai",
apiKey: "key123",
});
const providerId = result.providerId!;
const provider = result.config.models?.providers?.[providerId];
expect(provider?.baseUrl).toBe("https://my-resource.services.ai.azure.com/openai/v1");
expect(provider?.api).toBe("openai-responses");
expect(provider?.authHeader).toBe(false);
expect(provider?.headers).toEqual({ "api-key": "key123" });
const model = provider?.models?.find((m) => m.id === "gpt-4.1");
expect(model?.reasoning).toBe(false);
expect(model?.input).toEqual(["text"]);
expect(model?.compat).toEqual({ supportsStore: false });
const modelRef = `${providerId}/gpt-4.1`;
expect(result.config.agents?.defaults?.models?.[modelRef]?.params?.thinking).toBeUndefined();
});
it("strips pre-existing deployment path from Azure URL in stored config", () => {
const result = applyCustomApiConfig({
config: {},
baseUrl: "https://my-resource.openai.azure.com/openai/deployments/gpt-4",
modelId: "gpt-4",
compatibility: "openai",
apiKey: "key456",
});
const providerId = result.providerId!;
const provider = result.config.models?.providers?.[providerId];
expect(provider?.baseUrl).toBe("https://my-resource.openai.azure.com/openai/v1");
});
it("re-onboard updates existing Azure provider instead of creating a duplicate", () => {
const oldProviderId = "custom-my-resource-openai-azure-com";
const result = applyCustomApiConfig({
config: {
models: {
providers: {
[oldProviderId]: {
baseUrl: "https://my-resource.openai.azure.com/openai/deployments/gpt-4",
api: "openai-completions",
models: [
{
id: "gpt-4",
name: "gpt-4",
contextWindow: 1,
maxTokens: 1,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
reasoning: false,
},
],
},
},
},
},
baseUrl: "https://my-resource.openai.azure.com",
modelId: "gpt-4",
compatibility: "openai",
apiKey: "key789",
});
expect(result.providerId).toBe(oldProviderId);
expect(result.providerIdRenamedFrom).toBeUndefined();
const provider = result.config.models?.providers?.[oldProviderId];
expect(provider?.baseUrl).toBe("https://my-resource.openai.azure.com/openai/v1");
expect(provider?.api).toBe("openai-responses");
expect(provider?.authHeader).toBe(false);
expect(provider?.headers).toEqual({ "api-key": "key789" });
});
it("does not add azure fields for non-azure URLs", () => {
const result = applyCustomApiConfig({
config: {},
baseUrl: "https://llm.example.com/v1",
modelId: "foo-large",
compatibility: "openai",
apiKey: "key123",
providerId: "custom",
});
const provider = result.config.models?.providers?.custom;
expect(provider?.api).toBe("openai-completions");
expect(provider?.authHeader).toBeUndefined();
expect(provider?.headers).toBeUndefined();
expect(provider?.models?.[0]?.reasoning).toBe(false);
expect(provider?.models?.[0]?.input).toEqual(["text"]);
expect(provider?.models?.[0]?.compat).toBeUndefined();
expect(
result.config.agents?.defaults?.models?.["custom/foo-large"]?.params?.thinking,
).toBeUndefined();
});
it("re-onboard preserves user-customized fields for non-azure models", () => {
const result = applyCustomApiConfig({
config: {
models: {
providers: {
custom: {
baseUrl: "https://llm.example.com/v1",
api: "openai-completions",
models: [
{
id: "foo-large",
name: "My Custom Model",
reasoning: true,
input: ["text", "image"],
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
contextWindow: 131072,
maxTokens: 16384,
},
],
},
},
},
} as OpenClawConfig,
baseUrl: "https://llm.example.com/v1",
modelId: "foo-large",
compatibility: "openai",
apiKey: "key",
providerId: "custom",
});
const model = result.config.models?.providers?.custom?.models?.find(
(m) => m.id === "foo-large",
);
expect(model?.name).toBe("My Custom Model");
expect(model?.reasoning).toBe(true);
expect(model?.input).toEqual(["text", "image"]);
expect(model?.cost).toEqual({ input: 1, output: 2, cacheRead: 0, cacheWrite: 0 });
expect(model?.maxTokens).toBe(16384);
expect(model?.contextWindow).toBe(131072);
});
it("preserves existing per-model thinking when already set for azure reasoning model", () => {
const providerId = "custom-my-resource-openai-azure-com";
const modelRef = `${providerId}/o3-mini`;
const result = applyCustomApiConfig({
config: {
agents: {
defaults: {
models: {
[modelRef]: { params: { thinking: "high" } },
},
},
},
} as OpenClawConfig,
baseUrl: "https://my-resource.openai.azure.com",
modelId: "o3-mini",
compatibility: "openai",
apiKey: "key",
});
expect(result.config.agents?.defaults?.models?.[modelRef]?.params?.thinking).toBe("high");
});
});
describe("parseNonInteractiveCustomApiFlags", () => {

View File

@ -19,6 +19,9 @@ import type { SecretInputMode } from "./onboard-types.js";
const DEFAULT_CONTEXT_WINDOW = CONTEXT_WINDOW_HARD_MIN_TOKENS;
const DEFAULT_MAX_TOKENS = 4096;
// Azure OpenAI uses the Responses API which supports larger defaults
const AZURE_DEFAULT_CONTEXT_WINDOW = 400_000;
const AZURE_DEFAULT_MAX_TOKENS = 16_384;
const VERIFY_TIMEOUT_MS = 30_000;
function normalizeContextWindowForCustomModel(value: unknown): number {
@ -61,6 +64,32 @@ function transformAzureUrl(baseUrl: string, modelId: string): string {
return `${normalizedUrl}/openai/deployments/${modelId}`;
}
/**
* Transforms an Azure URL into the base URL stored in config.
*
* Example:
* https://my-resource.openai.azure.com
* => https://my-resource.openai.azure.com/openai/v1
*/
function transformAzureConfigUrl(baseUrl: string): string {
const normalizedUrl = baseUrl.endsWith("/") ? baseUrl.slice(0, -1) : baseUrl;
if (normalizedUrl.endsWith("/openai/v1")) {
return normalizedUrl;
}
// Strip a full deployment path back to the base origin
const deploymentIdx = normalizedUrl.indexOf("/openai/deployments/");
const base = deploymentIdx !== -1 ? normalizedUrl.slice(0, deploymentIdx) : normalizedUrl;
return `${base}/openai/v1`;
}
function hasSameHost(a: string, b: string): boolean {
try {
return new URL(a).hostname.toLowerCase() === new URL(b).hostname.toLowerCase();
} catch {
return false;
}
}
export type CustomApiCompatibility = "openai" | "anthropic";
type CustomApiCompatibilityChoice = CustomApiCompatibility | "unknown";
export type CustomApiResult = {
@ -174,7 +203,11 @@ function resolveUniqueEndpointId(params: {
}) {
const normalized = normalizeEndpointId(params.requestedId) || "custom";
const existing = params.providers[normalized];
if (!existing?.baseUrl || existing.baseUrl === params.baseUrl) {
if (
!existing?.baseUrl ||
existing.baseUrl === params.baseUrl ||
(isAzureUrl(params.baseUrl) && hasSameHost(existing.baseUrl, params.baseUrl))
) {
return { providerId: normalized, renamed: false };
}
let suffix = 2;
@ -320,26 +353,31 @@ async function requestOpenAiVerification(params: {
apiKey: string;
modelId: string;
}): Promise<VerificationResult> {
const endpoint = resolveVerificationEndpoint({
baseUrl: params.baseUrl,
modelId: params.modelId,
endpointPath: "chat/completions",
});
const isBaseUrlAzureUrl = isAzureUrl(params.baseUrl);
const headers = isBaseUrlAzureUrl
? buildAzureOpenAiHeaders(params.apiKey)
: buildOpenAiHeaders(params.apiKey);
if (isBaseUrlAzureUrl) {
const endpoint = new URL(
"responses",
transformAzureConfigUrl(params.baseUrl).replace(/\/?$/, "/"),
).href;
return await requestVerification({
endpoint,
headers,
body: {
messages: [{ role: "user", content: "Hi" }],
max_completion_tokens: 5,
model: params.modelId,
input: "Hi",
max_output_tokens: 1,
stream: false,
},
});
} else {
const endpoint = resolveVerificationEndpoint({
baseUrl: params.baseUrl,
modelId: params.modelId,
endpointPath: "chat/completions",
});
return await requestVerification({
endpoint,
headers,
@ -572,8 +610,9 @@ export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): Custom
throw new CustomApiError("invalid_model_id", "Custom provider model ID is required.");
}
const isAzure = isAzureUrl(baseUrl);
// Transform Azure URLs to include the deployment path for API calls
const resolvedBaseUrl = isAzureUrl(baseUrl) ? transformAzureUrl(baseUrl, modelId) : baseUrl;
const resolvedBaseUrl = isAzure ? transformAzureConfigUrl(baseUrl) : baseUrl;
const providerIdResult = resolveCustomProviderId({
config: params.config,
@ -597,21 +636,39 @@ export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): Custom
const existingProvider = providers[providerId];
const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : [];
const hasModel = existingModels.some((model) => model.id === modelId);
const nextModel = {
id: modelId,
name: `${modelId} (Custom Provider)`,
contextWindow: DEFAULT_CONTEXT_WINDOW,
maxTokens: DEFAULT_MAX_TOKENS,
input: ["text"] as ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
reasoning: false,
};
const isLikelyReasoningModel = isAzure && /\b(o[134]|gpt-([5-9]|\d{2,}))\b/i.test(modelId);
const nextModel = isAzure
? {
id: modelId,
name: `${modelId} (Custom Provider)`,
contextWindow: AZURE_DEFAULT_CONTEXT_WINDOW,
maxTokens: AZURE_DEFAULT_MAX_TOKENS,
input: isLikelyReasoningModel
? (["text", "image"] as Array<"text" | "image">)
: (["text"] as ["text"]),
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
reasoning: isLikelyReasoningModel,
compat: { supportsStore: false },
}
: {
id: modelId,
name: `${modelId} (Custom Provider)`,
contextWindow: DEFAULT_CONTEXT_WINDOW,
maxTokens: DEFAULT_MAX_TOKENS,
input: ["text"] as ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
reasoning: false,
};
const mergedModels = hasModel
? existingModels.map((model) =>
model.id === modelId
? {
...model,
...(isAzure ? nextModel : {}),
name: model.name ?? nextModel.name,
cost: model.cost ?? nextModel.cost,
contextWindow: normalizeContextWindowForCustomModel(model.contextWindow),
maxTokens: model.maxTokens ?? nextModel.maxTokens,
}
: model,
)
@ -621,6 +678,11 @@ export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): Custom
normalizeOptionalProviderApiKey(params.apiKey) ??
normalizeOptionalProviderApiKey(existingApiKey);
const providerApi = isAzure
? ("openai-responses" as const)
: resolveProviderApi(params.compatibility);
const azureHeaders = isAzure && normalizedApiKey ? { "api-key": normalizedApiKey } : undefined;
let config: OpenClawConfig = {
...params.config,
models: {
@ -631,8 +693,10 @@ export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): Custom
[providerId]: {
...existingProviderRest,
baseUrl: resolvedBaseUrl,
api: resolveProviderApi(params.compatibility),
api: providerApi,
...(normalizedApiKey ? { apiKey: normalizedApiKey } : {}),
...(isAzure ? { authHeader: false } : {}),
...(azureHeaders ? { headers: azureHeaders } : {}),
models: mergedModels.length > 0 ? mergedModels : [nextModel],
},
},
@ -640,6 +704,30 @@ export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): Custom
};
config = applyPrimaryModel(config, modelRef);
if (isAzure && isLikelyReasoningModel) {
const existingPerModelThinking = config.agents?.defaults?.models?.[modelRef]?.params?.thinking;
if (!existingPerModelThinking) {
config = {
...config,
agents: {
...config.agents,
defaults: {
...config.agents?.defaults,
models: {
...config.agents?.defaults?.models,
[modelRef]: {
...config.agents?.defaults?.models?.[modelRef],
params: {
...config.agents?.defaults?.models?.[modelRef]?.params,
thinking: "medium",
},
},
},
},
},
};
}
}
if (alias) {
config = {
...config,