Compare commits

...

3 Commits

Author SHA1 Message Date
Peter Steinberger
8a91af22a5 fix: clean up codex inline model api fallback (#39753) (thanks @justinhuangcode) 2026-03-08 13:51:18 +00:00
justinhuangcode
e4bfcff5a8 chore: update secrets baseline line numbers 2026-03-08 13:49:02 +00:00
justinhuangcode
c42dc2e8c2 fix(agents): let forward-compat resolve api when inline model omits it
When a user configures `models.providers.openai-codex` with a models
array but omits the `api` field, `buildInlineProviderModels` produces
an entry with `api: undefined`.  The inline-match early return then
hands this incomplete model straight to the caller, skipping the
forward-compat resolver that would supply the correct
`openai-codex-responses` api — causing a crash loop.

Let the inline match fall through to forward-compat when `api` is
absent so the resolver chain can fill it in.

Fixes #39682
2026-03-08 13:49:02 +00:00
5 changed files with 33 additions and 6 deletions

View File

@ -11583,7 +11583,7 @@
"filename": "src/agents/pi-embedded-runner/model.ts",
"hashed_secret": "e774aaeac31c6272107ba89080295e277050fa7c",
"is_verified": false,
"line_number": 267
"line_number": 272
}
],
"src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts": [

View File

@ -14,6 +14,7 @@ Docs: https://docs.openclaw.ai
- Mattermost replies: keep `root_id` pinned to the existing thread root when an agent replies inside a thread, while still using reply-target threading for top-level posts. (#27744) thanks @hnykda.
- Agents/failover: detect Amazon Bedrock `Too many tokens per day` quota errors as rate limits across fallback, cron retry, and memory embeddings while keeping context-window `too many tokens per request` errors out of the rate-limit lane. (#39377) Thanks @gambletan.
- Android/Play distribution: remove self-update, background location, `screen.record`, and background mic capture from the Android app, narrow the foreground service to `dataSync` only, and clean up the legacy `location.enabledMode=always` preference migration. (#39660) Thanks @obviyus.
- Agents/openai-codex model resolution: fall through from inline `openai-codex` model entries without an `api` so GPT-5.4 keeps the codex transport and still preserves configured `baseUrl` and headers. (#39753) Thanks @justinhuangcode.
## 2026.3.7

View File

@ -22,7 +22,7 @@ enum HostEnvSecurityPolicy {
"PS4",
"GCONV_PATH",
"IFS",
"SSLKEYLOGFILE",
"SSLKEYLOGFILE"
]
static let blockedOverrideKeys: Set<String> = [
@ -50,17 +50,17 @@ enum HostEnvSecurityPolicy {
"OPENSSL_ENGINES",
"PYTHONSTARTUP",
"WGETRC",
"CURL_HOME",
"CURL_HOME"
]
static let blockedOverridePrefixes: [String] = [
"GIT_CONFIG_",
"NPM_CONFIG_",
"NPM_CONFIG_"
]
static let blockedPrefixes: [String] = [
"DYLD_",
"LD_",
"BASH_FUNC_",
"BASH_FUNC_"
]
}

View File

@ -638,6 +638,32 @@ describe("resolveModel", () => {
});
});
it("uses codex fallback when inline model omits api (#39682)", () => {
mockOpenAICodexTemplateModel();
const cfg: OpenClawConfig = {
models: {
providers: {
"openai-codex": {
baseUrl: "https://custom.example.com",
headers: { "X-Custom-Auth": "token-123" },
models: [{ id: "gpt-5.4" }],
},
},
},
} as unknown as OpenClawConfig;
const result = resolveModel("openai-codex", "gpt-5.4", "/tmp/agent", cfg);
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
api: "openai-codex-responses",
baseUrl: "https://custom.example.com",
headers: { "X-Custom-Auth": "token-123" },
id: "gpt-5.4",
provider: "openai-codex",
});
});
it("includes auth hint for unknown ollama models (#17328)", () => {
// resetMockDiscoverModels() in beforeEach already sets find → null
const result = resolveModel("ollama", "gemma3:4b", "/tmp/agent");

View File

@ -160,7 +160,7 @@ export function resolveModelWithRegistry(params: {
const inlineMatch = inlineModels.find(
(entry) => normalizeProviderId(entry.provider) === normalizedProvider && entry.id === modelId,
);
if (inlineMatch) {
if (inlineMatch?.api) {
return normalizeModelCompat(inlineMatch as Model<Api>);
}