feat(openai-codex): add gpt-5.4-mini support for Codex OAuth provider
Add forward-compat resolution, catalog augmentation, xhigh thinking, and modern model filtering for gpt-5.4-mini in the openai-codex provider. GPT-5.4 mini launched March 17, 2026 and is available in Codex, but the openai-codex OAuth provider did not resolve it. The openai (API key) provider was updated in #49289; this completes the coverage for openai-codex OAuth users. Note: gpt-5.4-nano is API-only per OpenAI and is NOT available via Codex OAuth, so it is intentionally excluded from this change. Changes: - extensions/openai/openai-codex-provider.ts: add mini to forward-compat resolution, xhigh model ids, modern model ids, and catalog augmentation - src/plugins/provider-catalog-metadata.ts: add bundled catalog entry for openai-codex mini - Tests: runtime contract, provider-runtime, model-compat, pi-embedded-runner, model-catalog, directive behavior, and test-support/harness updates Closes #37623 (for openai-codex mini gap)
This commit is contained in:
parent
ca13256913
commit
fa699c157d
@ -33,6 +33,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Browser/existing-session: support `browser.profiles.<name>.userDataDir` so Chrome DevTools MCP can attach to Brave, Edge, and other Chromium-based browsers through their own user data directories. (#48170) Thanks @velvet-shark.
|
||||
- Skills/prompt budget: preserve all registered skills via a compact catalog fallback before dropping entries when the full prompt format exceeds `maxSkillsPromptChars`. (#47553) Thanks @snese.
|
||||
- Models/OpenAI: add native forward-compat support for `gpt-5.4-mini` and `gpt-5.4-nano` in the OpenAI provider catalog, runtime resolution, and reasoning capability gates. Thanks @vincentkoc.
|
||||
- Models/OpenAI Codex: add `gpt-5.4-mini` support for the `openai-codex` OAuth provider — forward-compat resolution, catalog augmentation, xhigh thinking, and modern model filtering. (`gpt-5.4-nano` is API-only and not available via Codex OAuth.) Thanks @LittleMeHere.
|
||||
- Plugins/bundles: make enabled bundle MCP servers expose runnable tools in embedded Pi, and default relative bundle MCP launches to the bundle root so marketplace bundles like Context7 work through Pi instead of stopping at config import.
|
||||
- Scope message SecretRef resolution and harden doctor/status paths. (#48728) Thanks @joshavant.
|
||||
- Plugins/testing: add a public `openclaw/plugin-sdk/testing` surface for plugin-author test helpers, and move bundled-extension-only test bridges out of `extensions/` into private repo test helpers.
|
||||
|
||||
@ -31,9 +31,11 @@ import {
|
||||
const PROVIDER_ID = "openai-codex";
|
||||
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
|
||||
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
|
||||
const OPENAI_CODEX_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini";
|
||||
const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 1_050_000;
|
||||
const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000;
|
||||
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
|
||||
const OPENAI_CODEX_GPT_54_MINI_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
|
||||
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
|
||||
const OPENAI_CODEX_GPT_53_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
|
||||
const OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS = 128_000;
|
||||
@ -42,6 +44,7 @@ const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
|
||||
const OPENAI_CODEX_DEFAULT_MODEL = `${PROVIDER_ID}/${OPENAI_CODEX_GPT_54_MODEL_ID}`;
|
||||
const OPENAI_CODEX_XHIGH_MODEL_IDS = [
|
||||
OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_53_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
|
||||
"gpt-5.2-codex",
|
||||
@ -49,6 +52,7 @@ const OPENAI_CODEX_XHIGH_MODEL_IDS = [
|
||||
] as const;
|
||||
const OPENAI_CODEX_MODERN_MODEL_IDS = [
|
||||
OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
|
||||
"gpt-5.2",
|
||||
"gpt-5.2-codex",
|
||||
OPENAI_CODEX_GPT_53_MODEL_ID,
|
||||
@ -99,6 +103,8 @@ function resolveCodexForwardCompatModel(
|
||||
contextWindow: OPENAI_CODEX_GPT_54_CONTEXT_TOKENS,
|
||||
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
|
||||
};
|
||||
} else if (lower === OPENAI_CODEX_GPT_54_MINI_MODEL_ID) {
|
||||
templateIds = OPENAI_CODEX_GPT_54_MINI_TEMPLATE_MODEL_IDS;
|
||||
} else if (lower === OPENAI_CODEX_GPT_53_SPARK_MODEL_ID) {
|
||||
templateIds = [OPENAI_CODEX_GPT_53_MODEL_ID, ...OPENAI_CODEX_TEMPLATE_MODEL_IDS];
|
||||
patch = {
|
||||
@ -266,6 +272,11 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
|
||||
providerId: PROVIDER_ID,
|
||||
templateIds: OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS,
|
||||
});
|
||||
const gpt54MiniTemplate = findCatalogTemplate({
|
||||
entries: ctx.entries,
|
||||
providerId: PROVIDER_ID,
|
||||
templateIds: OPENAI_CODEX_GPT_54_MINI_TEMPLATE_MODEL_IDS,
|
||||
});
|
||||
const sparkTemplate = findCatalogTemplate({
|
||||
entries: ctx.entries,
|
||||
providerId: PROVIDER_ID,
|
||||
@ -279,6 +290,13 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
|
||||
name: OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||
}
|
||||
: undefined,
|
||||
gpt54MiniTemplate
|
||||
? {
|
||||
...gpt54MiniTemplate,
|
||||
id: OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
|
||||
name: OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
|
||||
}
|
||||
: undefined,
|
||||
sparkTemplate
|
||||
? {
|
||||
...sparkTemplate,
|
||||
|
||||
@ -244,6 +244,13 @@ describe("loadModelCatalog", () => {
|
||||
name: "gpt-5.4",
|
||||
}),
|
||||
);
|
||||
expect(result).toContainEqual(
|
||||
expect.objectContaining({
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.4-mini",
|
||||
name: "gpt-5.4-mini",
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("merges configured models for opted-in non-pi-native providers", async () => {
|
||||
|
||||
@ -350,7 +350,7 @@ describe("isModernModelRef", () => {
|
||||
provider === "openai" &&
|
||||
["gpt-5.4", "gpt-5.4-pro", "gpt-5.4-mini", "gpt-5.4-nano"].includes(context.modelId)
|
||||
? true
|
||||
: provider === "openai-codex" && context.modelId === "gpt-5.4"
|
||||
: provider === "openai-codex" && ["gpt-5.4", "gpt-5.4-mini"].includes(context.modelId)
|
||||
? true
|
||||
: provider === "opencode" && ["claude-opus-4-6", "gemini-3-pro"].includes(context.modelId)
|
||||
? true
|
||||
@ -364,6 +364,7 @@ describe("isModernModelRef", () => {
|
||||
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4-mini" })).toBe(true);
|
||||
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4-nano" })).toBe(true);
|
||||
expect(isModernModelRef({ provider: "openai-codex", id: "gpt-5.4" })).toBe(true);
|
||||
expect(isModernModelRef({ provider: "openai-codex", id: "gpt-5.4-mini" })).toBe(true);
|
||||
expect(isModernModelRef({ provider: "opencode", id: "claude-opus-4-6" })).toBe(true);
|
||||
expect(isModernModelRef({ provider: "opencode", id: "gemini-3-pro" })).toBe(true);
|
||||
expect(isModernModelRef({ provider: "opencode-go", id: "kimi-k2.5" })).toBe(true);
|
||||
|
||||
@ -666,6 +666,15 @@ describe("resolveModel", () => {
|
||||
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
|
||||
});
|
||||
|
||||
it("builds an openai-codex fallback for gpt-5.4-mini", () => {
|
||||
mockOpenAICodexTemplateModel();
|
||||
|
||||
const result = resolveModel("openai-codex", "gpt-5.4-mini", "/tmp/agent");
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4-mini"));
|
||||
});
|
||||
|
||||
it("builds an openai-codex fallback for gpt-5.3-codex-spark", () => {
|
||||
mockOpenAICodexTemplateModel();
|
||||
|
||||
|
||||
@ -244,7 +244,7 @@ describe("directive behavior", () => {
|
||||
|
||||
const unsupportedModelTexts = await runThinkingDirective(home, "openai/gpt-4.1-mini");
|
||||
expect(unsupportedModelTexts).toContain(
|
||||
'Thinking level "xhigh" is only supported for openai/gpt-5.4, openai/gpt-5.4-pro, openai/gpt-5.4-mini, openai/gpt-5.4-nano, openai/gpt-5.2, openai-codex/gpt-5.4, openai-codex/gpt-5.3-codex, openai-codex/gpt-5.3-codex-spark, openai-codex/gpt-5.2-codex, openai-codex/gpt-5.1-codex, github-copilot/gpt-5.2-codex or github-copilot/gpt-5.2.',
|
||||
'Thinking level "xhigh" is only supported for openai/gpt-5.4, openai/gpt-5.4-pro, openai/gpt-5.4-mini, openai/gpt-5.4-nano, openai/gpt-5.2, openai-codex/gpt-5.4, openai-codex/gpt-5.4-mini, openai-codex/gpt-5.3-codex, openai-codex/gpt-5.3-codex-spark, openai-codex/gpt-5.2-codex, openai-codex/gpt-5.1-codex, github-copilot/gpt-5.2-codex or github-copilot/gpt-5.2.',
|
||||
);
|
||||
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
@ -587,6 +587,31 @@ describe("provider runtime contract", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("owns openai-codex gpt-5.4 mini forward-compat resolution", () => {
|
||||
const provider = requireProviderContractProvider("openai-codex");
|
||||
const model = provider.resolveDynamicModel?.({
|
||||
provider: "openai-codex",
|
||||
modelId: "gpt-5.4-mini",
|
||||
modelRegistry: {
|
||||
find: (_provider: string, id: string) =>
|
||||
id === "gpt-5.2-codex"
|
||||
? createModel({
|
||||
id,
|
||||
api: "openai-codex-responses",
|
||||
provider: "openai-codex",
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
})
|
||||
: null,
|
||||
} as never,
|
||||
});
|
||||
|
||||
expect(model).toMatchObject({
|
||||
id: "gpt-5.4-mini",
|
||||
provider: "openai-codex",
|
||||
api: "openai-codex-responses",
|
||||
});
|
||||
});
|
||||
|
||||
it("owns codex transport defaults", () => {
|
||||
const provider = requireProviderContractProvider("openai-codex");
|
||||
expect(
|
||||
|
||||
@ -53,6 +53,11 @@ export function augmentBundledProviderCatalog(
|
||||
providerId: OPENAI_CODEX_PROVIDER_ID,
|
||||
templateIds: ["gpt-5.3-codex", "gpt-5.2-codex"],
|
||||
});
|
||||
const openAiCodexGpt54MiniTemplate = findCatalogTemplate({
|
||||
entries: context.entries,
|
||||
providerId: OPENAI_CODEX_PROVIDER_ID,
|
||||
templateIds: ["gpt-5.3-codex", "gpt-5.2-codex"],
|
||||
});
|
||||
const openAiCodexSparkTemplate = findCatalogTemplate({
|
||||
entries: context.entries,
|
||||
providerId: OPENAI_CODEX_PROVIDER_ID,
|
||||
@ -95,6 +100,13 @@ export function augmentBundledProviderCatalog(
|
||||
name: "gpt-5.4",
|
||||
}
|
||||
: undefined,
|
||||
openAiCodexGpt54MiniTemplate
|
||||
? {
|
||||
...openAiCodexGpt54MiniTemplate,
|
||||
id: "gpt-5.4-mini",
|
||||
name: "gpt-5.4-mini",
|
||||
}
|
||||
: undefined,
|
||||
openAiCodexSparkTemplate
|
||||
? {
|
||||
...openAiCodexSparkTemplate,
|
||||
|
||||
@ -14,6 +14,7 @@ export const expectedAugmentedOpenaiCodexCatalogEntries = [
|
||||
{ provider: "openai", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
|
||||
{ provider: "openai", id: "gpt-5.4-nano", name: "gpt-5.4-nano" },
|
||||
{ provider: "openai-codex", id: "gpt-5.4", name: "gpt-5.4" },
|
||||
{ provider: "openai-codex", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
|
||||
{
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.3-codex-spark",
|
||||
|
||||
@ -217,6 +217,7 @@ describe("provider-runtime", () => {
|
||||
{ provider: "openai", id: "gpt-5.4", name: "gpt-5.4" },
|
||||
{ provider: "openai", id: "gpt-5.4-pro", name: "gpt-5.4-pro" },
|
||||
{ provider: "openai-codex", id: "gpt-5.4", name: "gpt-5.4" },
|
||||
{ provider: "openai-codex", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
|
||||
{
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.3-codex-spark",
|
||||
@ -483,6 +484,7 @@ describe("provider-runtime", () => {
|
||||
{ provider: "openai", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
|
||||
{ provider: "openai", id: "gpt-5.4-nano", name: "gpt-5.4-nano" },
|
||||
{ provider: "openai-codex", id: "gpt-5.4", name: "gpt-5.4" },
|
||||
{ provider: "openai-codex", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
|
||||
{
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.3-codex-spark",
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user