Compare commits

...

18 Commits

Author SHA1 Message Date
Vincent Koc
d815c8eac6 Tests: stabilize cron wake-mode legacy-delivery coverage 2026-03-06 01:07:06 -05:00
Vincent Koc
a6168ce147 Gateway: normalize explicit deliver-route boolean 2026-03-06 00:58:09 -05:00
Vincent Koc
c3f4250442 Gateway: guard base64 image size accounting 2026-03-06 00:57:57 -05:00
Vincent Koc
298f9a4f60 Tests: fix models list e2e registry mock 2026-03-06 00:55:43 -05:00
Vincent Koc
e21f512e5e Tests: remove spark runner fallback case 2026-03-06 00:55:43 -05:00
Vincent Koc
9f72c81823 Tests: remove spark model compat case 2026-03-06 00:55:43 -05:00
Vincent Koc
176ac942ce Tests: remove spark runner compat case 2026-03-06 00:55:43 -05:00
Vincent Koc
670805cab4 Models: drop codex spark follow-up scope 2026-03-06 00:55:43 -05:00
Vincent Koc
8efaa547dd Models: resolve GPT-5.3 Codex Spark fallback 2026-03-06 00:55:43 -05:00
Vincent Koc
dfbde683ce Tests: trim redundant GPT-5.4 overlap coverage 2026-03-06 00:55:43 -05:00
Vincent Koc
e16aa9ccf7 Tests: align GPT-5.4 fallback pricing expectations 2026-03-06 00:55:43 -05:00
Vincent Koc
67d8bb2741 Tests: wait for GPT-5.4 context warmup 2026-03-06 00:55:43 -05:00
Vincent Koc
4bafa01d63 Tests: fix OpenAI provider fixture shape 2026-03-06 00:55:43 -05:00
Vincent Koc
a13284ccd8 Agents: fix context registry typing 2026-03-06 00:55:43 -05:00
Vincent Koc
3b0779a75a Models: address GPT-5.4 review feedback 2026-03-06 00:55:42 -05:00
Vincent Koc
48adb47ec3 Tests: cover GPT-5.4 raw registry fallbacks 2026-03-06 00:55:42 -05:00
Vincent Koc
c8dbd73fbd Models: augment raw registry GPT-5.4 fallbacks 2026-03-06 00:55:42 -05:00
Vincent Koc
b7776bb97e Models: mark GPT-5.4 as modern and xhigh 2026-03-06 00:55:42 -05:00
10 changed files with 281 additions and 45 deletions

View File

@ -34,6 +34,38 @@ describe("lookupContextTokens", () => {
expect(lookupContextTokens("openrouter/claude-sonnet")).toBe(321_000);
});
it("adds GPT-5.4 context windows from forward-compat discovery fallbacks", async () => {
vi.doMock("../config/config.js", () => ({
loadConfig: () => ({
models: { providers: {} },
}),
}));
vi.doMock("./models-config.js", () => ({
ensureOpenClawModelsJson: vi.fn(async () => {}),
}));
vi.doMock("./agent-paths.js", () => ({
resolveOpenClawAgentDir: () => "/tmp/openclaw-agent",
}));
vi.doMock("./pi-model-discovery.js", () => ({
discoverAuthStorage: vi.fn(() => ({})),
discoverModels: vi.fn(() => ({
getAll: () => [
{
provider: "openai",
id: "gpt-5.2",
contextWindow: 400_000,
},
],
})),
}));
const { lookupContextTokens } = await import("./context.js");
await vi.waitFor(() => {
expect(lookupContextTokens("gpt-5.4")).toBe(1_050_000);
expect(lookupContextTokens("gpt-5.4-pro")).toBe(1_050_000);
});
});
it("does not skip eager warmup when --profile is followed by -- terminator", async () => {
const loadConfigMock = vi.fn(() => ({ models: {} }));
vi.doMock("../config/config.js", () => ({

View File

@ -1,17 +1,19 @@
// Lazy-load pi-coding-agent model metadata so we can infer context windows when
// the agent reports a model id. This includes custom models.json entries.
import type { Api, Model } from "@mariozechner/pi-ai";
import { loadConfig } from "../config/config.js";
import type { OpenClawConfig } from "../config/config.js";
import { computeBackoff, type BackoffPolicy } from "../infra/backoff.js";
import { consumeRootOptionToken, FLAG_TERMINATOR } from "../infra/cli-root-options.js";
import { resolveOpenClawAgentDir } from "./agent-paths.js";
import { augmentKnownForwardCompatModels } from "./model-forward-compat.js";
import { ensureOpenClawModelsJson } from "./models-config.js";
type ModelEntry = { id: string; contextWindow?: number };
type ModelEntry = { id: string; provider?: string; contextWindow?: number };
type ModelRegistryLike = {
getAvailable?: () => ModelEntry[];
getAll: () => ModelEntry[];
getAvailable?: () => Model<Api>[];
getAll: () => Model<Api>[];
};
type ConfigModelEntry = { id?: string; contextWindow?: number };
type ProviderConfigEntry = { models?: ConfigModelEntry[] };
@ -156,10 +158,11 @@ function ensureContextWindowCacheLoaded(): Promise<void> {
const agentDir = resolveOpenClawAgentDir();
const authStorage = discoverAuthStorage(agentDir);
const modelRegistry = discoverModels(authStorage, agentDir) as unknown as ModelRegistryLike;
const models =
const models = augmentKnownForwardCompatModels(
typeof modelRegistry.getAvailable === "function"
? modelRegistry.getAvailable()
: modelRegistry.getAll();
: modelRegistry.getAll(),
);
applyDiscoveredContextWindows({
cache: MODEL_CACHE,
models,

View File

@ -340,7 +340,7 @@ describe("resolveForwardCompatModel", () => {
expect(model?.reasoning).toBe(true);
expect(model?.contextWindow).toBe(1_050_000);
expect(model?.maxTokens).toBe(128_000);
expect(model?.cost).toEqual({ input: 0, output: 0, cacheRead: 0, cacheWrite: 0 });
expect(model?.cost).toEqual({ input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 });
});
it("resolves openai gpt-5.4-pro via template fallback", () => {
@ -353,6 +353,7 @@ describe("resolveForwardCompatModel", () => {
expect(model?.baseUrl).toBe("https://api.openai.com/v1");
expect(model?.contextWindow).toBe(1_050_000);
expect(model?.maxTokens).toBe(128_000);
expect(model?.cost).toEqual({ input: 30, output: 180, cacheRead: 0, cacheWrite: 0 });
});
it("resolves openai-codex gpt-5.4 via codex template fallback", () => {

View File

@ -10,6 +10,9 @@ const OPENAI_GPT_54_CONTEXT_TOKENS = 1_050_000;
const OPENAI_GPT_54_MAX_TOKENS = 128_000;
const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const;
const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const;
const OPENAI_GPT_54_COST = { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 } as const;
// OpenAI currently publishes no cached-input price for GPT-5.4 Pro.
const OPENAI_GPT_54_PRO_COST = { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 } as const;
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
@ -55,35 +58,23 @@ function resolveOpenAIGpt54ForwardCompatModel(
return undefined;
}
return (
cloneFirstTemplateModel({
normalizedProvider,
trimmedModelId,
templateIds: [...templateIds],
modelRegistry,
patch: {
api: "openai-responses",
provider: normalizedProvider,
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
},
}) ??
normalizeModelCompat({
id: trimmedModelId,
name: trimmedModelId,
const template = cloneFirstTemplateModel({
normalizedProvider,
trimmedModelId,
templateIds: [...templateIds],
modelRegistry,
patch: {
api: "openai-responses",
provider: normalizedProvider,
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
} as Model<Api>)
);
},
});
return buildOpenAIGpt54FallbackModel(trimmedModelId, template);
}
function cloneFirstTemplateModel(params: {
@ -109,6 +100,144 @@ function cloneFirstTemplateModel(params: {
return undefined;
}
function cloneSyntheticTemplateModel(params: {
models: Model<Api>[];
normalizedProvider: string;
trimmedModelId: string;
templateIds: readonly string[];
patch?: Partial<Model<Api>>;
}): Model<Api> | undefined {
const { models, normalizedProvider, trimmedModelId, templateIds, patch } = params;
for (const templateId of [...new Set(templateIds)].filter(Boolean)) {
const template =
models.find(
(model) =>
normalizeProviderId(model.provider) === normalizedProvider &&
model.id.trim().toLowerCase() === templateId.toLowerCase(),
) ?? null;
if (!template) {
continue;
}
return normalizeModelCompat({
...template,
id: trimmedModelId,
name: trimmedModelId,
...patch,
} as Model<Api>);
}
return undefined;
}
function buildOpenAIGpt54FallbackModel(modelId: string, template?: Model<Api> | null): Model<Api> {
return normalizeModelCompat({
...template,
id: modelId,
name: modelId,
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost:
modelId.toLowerCase() === OPENAI_GPT_54_PRO_MODEL_ID
? OPENAI_GPT_54_PRO_COST
: OPENAI_GPT_54_COST,
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
} as Model<Api>);
}
export function augmentKnownForwardCompatModels(models: Model<Api>[]): Model<Api>[] {
const next = [...models];
const existing = new Set(
next.map((model) => `${normalizeProviderId(model.provider)}::${model.id.trim().toLowerCase()}`),
);
const hasProvider = (provider: string) =>
next.some((model) => normalizeProviderId(model.provider) === provider);
const pushIfMissing = (provider: string, id: string, model: Model<Api> | undefined) => {
const key = `${normalizeProviderId(provider)}::${id.trim().toLowerCase()}`;
if (existing.has(key) || !model) {
return;
}
next.push(model);
existing.add(key);
};
if (hasProvider("openai")) {
pushIfMissing(
"openai",
OPENAI_GPT_54_MODEL_ID,
buildOpenAIGpt54FallbackModel(
OPENAI_GPT_54_MODEL_ID,
cloneSyntheticTemplateModel({
models: next,
normalizedProvider: "openai",
trimmedModelId: OPENAI_GPT_54_MODEL_ID,
templateIds: OPENAI_GPT_54_TEMPLATE_MODEL_IDS,
patch: {
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
},
}),
),
);
pushIfMissing(
"openai",
OPENAI_GPT_54_PRO_MODEL_ID,
buildOpenAIGpt54FallbackModel(
OPENAI_GPT_54_PRO_MODEL_ID,
cloneSyntheticTemplateModel({
models: next,
normalizedProvider: "openai",
trimmedModelId: OPENAI_GPT_54_PRO_MODEL_ID,
templateIds: OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS,
patch: {
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
},
}),
),
);
}
if (hasProvider("openai-codex")) {
pushIfMissing(
"openai-codex",
OPENAI_CODEX_GPT_54_MODEL_ID,
cloneSyntheticTemplateModel({
models: next,
normalizedProvider: "openai-codex",
trimmedModelId: OPENAI_CODEX_GPT_54_MODEL_ID,
templateIds: OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS,
}) ??
normalizeModelCompat({
id: OPENAI_CODEX_GPT_54_MODEL_ID,
name: OPENAI_CODEX_GPT_54_MODEL_ID,
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: DEFAULT_CONTEXT_TOKENS,
maxTokens: DEFAULT_CONTEXT_TOKENS,
} as Model<Api>),
);
}
return next;
}
const CODEX_GPT54_ELIGIBLE_PROVIDERS = new Set(["openai-codex"]);
const CODEX_GPT53_ELIGIBLE_PROVIDERS = new Set(["openai-codex", "github-copilot"]);

View File

@ -251,7 +251,6 @@ describe("resolveModel", () => {
expect(result.model?.contextWindow).toBe(262144);
expect(result.model?.maxTokens).toBe(32768);
});
it("propagates reasoning from matching configured fallback model", () => {
const cfg = {
models: {
@ -446,6 +445,30 @@ describe("resolveModel", () => {
});
});
it("uses GPT-5.4 Pro pricing when cloning an older openai template", () => {
mockDiscoveredModel({
provider: "openai",
modelId: "gpt-5.2",
templateModel: buildForwardCompatTemplate({
id: "gpt-5.2",
name: "GPT-5.2",
provider: "openai",
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
}),
});
const result = resolveModel("openai", "gpt-5.4-pro", "/tmp/agent");
expect(result.error).toBeUndefined();
expect(result.model?.cost).toEqual({
input: 30,
output: 180,
cacheRead: 0,
cacheWrite: 0,
});
});
it("builds an anthropic forward-compat fallback for claude-opus-4-6", () => {
mockDiscoveredModel({
provider: "anthropic",

View File

@ -87,6 +87,15 @@ vi.mock("../agents/pi-embedded-runner/model.js", () => ({
resolveModel: () => {
throw new Error("resolveModel should not be called from models.list tests");
},
resolveModelWithRegistry: ({
provider,
modelId,
modelRegistry,
}: {
provider: string;
modelId: string;
modelRegistry: { find: (provider: string, modelId: string) => unknown };
}) => modelRegistry.find(provider, modelId),
}));
function makeRuntime() {
@ -264,6 +273,42 @@ describe("models list/status", () => {
expect(payload.models[0]?.available).toBe(false);
});
it("loadModelRegistry augments raw OpenAI models with GPT-5.4 fallbacks", async () => {
modelRegistryState.models = [
{
provider: "openai",
id: "gpt-5.2",
name: "GPT-5.2",
api: "openai-responses",
input: ["text", "image"],
baseUrl: "https://api.openai.com/v1",
contextWindow: 400_000,
maxTokens: 128_000,
reasoning: true,
cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 },
},
];
modelRegistryState.available = modelRegistryState.models;
const { models } = await loadModelRegistry({});
expect(models).toEqual(
expect.arrayContaining([
expect.objectContaining({
provider: "openai",
id: "gpt-5.4",
api: "openai-responses",
contextWindow: 1_050_000,
}),
expect.objectContaining({
provider: "openai",
id: "gpt-5.4-pro",
api: "openai-responses",
contextWindow: 1_050_000,
}),
]),
);
});
it("models list does not treat availability-unavailable code as discovery fallback", async () => {
configureGoogleAntigravityModel("claude-opus-4-6-thinking");
modelRegistryState.getAllError = Object.assign(new Error("model discovery failed"), {

View File

@ -8,6 +8,7 @@ import {
resolveAwsSdkEnvVarName,
resolveEnvApiKey,
} from "../../agents/model-auth.js";
import { augmentKnownForwardCompatModels } from "../../agents/model-forward-compat.js";
import { ensureOpenClawModelsJson } from "../../agents/models-config.js";
import { discoverAuthStorage, discoverModels } from "../../agents/pi-model-discovery.js";
import type { OpenClawConfig } from "../../config/config.js";
@ -99,7 +100,7 @@ export async function loadModelRegistry(cfg: OpenClawConfig) {
const agentDir = resolveOpenClawAgentDir();
const authStorage = discoverAuthStorage(agentDir);
const registry = discoverModels(authStorage, agentDir);
const models = registry.getAll();
const models = augmentKnownForwardCompatModels(registry.getAll());
let availableKeys: Set<string> | undefined;
let availabilityErrorMessage: string | undefined;

View File

@ -192,7 +192,8 @@ vi.mock("node:fs/promises", async (importOriginal) => {
beforeEach(() => {
fsState.entries.clear();
fsState.nowMs = 0;
fsState.fixtureCount = 0;
// Keep fixture paths monotonic across tests so late async writes from a
// previous CronService instance cannot collide with a recycled fake store.
ensureDir(fixturesRoot);
});
@ -540,18 +541,14 @@ describe("CronService", () => {
const job = await addWakeModeNowMainSystemEventJob(cron, { name: "wakeMode now waits" });
const runPromise = cron.run(job.id, "force");
// `cron.run()` now persists the running marker before executing the job.
// Allow more microtask turns so the post-lock execution can start.
for (let i = 0; i < 500; i++) {
if (runHeartbeatOnce.mock.calls.length > 0) {
break;
}
// Let the locked() chain progress.
await Promise.resolve();
}
// `cron.run()` executes after releasing the persistence lock, so wait for
// the heartbeat runner to observe the started job instead of hand-spinning
// microtasks. This keeps the assertion stable across runtimes.
await vi.waitFor(() => {
expect(runHeartbeatOnce).toHaveBeenCalledTimes(1);
expect(requestHeartbeatNow).not.toHaveBeenCalled();
});
expect(runHeartbeatOnce).toHaveBeenCalledTimes(1);
expect(requestHeartbeatNow).not.toHaveBeenCalled();
expectMainSystemEventPosted(enqueueSystemEvent, "hello");
expect(job.state.runningAtMs).toBeTypeOf("number");

View File

@ -300,7 +300,11 @@ async function resolveImagesForRequest(
for (const url of urls) {
const source = parseImageUrlToSource(url);
if (source.type === "base64") {
totalBytes += estimateBase64DecodedBytes(source.data);
const base64Data = source.data;
if (typeof base64Data !== "string") {
throw new Error("image_url data URI is missing payload data");
}
totalBytes += estimateBase64DecodedBytes(base64Data);
if (totalBytes > limits.maxTotalImageBytes) {
throw new Error(
`Total image payload too large (${totalBytes}; limit ${limits.maxTotalImageBytes})`,

View File

@ -906,13 +906,14 @@ export const chatHandlers: GatewayRequestHandlers = {
(isChannelScopedSession || hasLegacyChannelPeerShape)) ||
(isConfiguredMainSessionScope && client?.connect !== undefined && !isFromWebchatClient)),
);
const hasDeliverableRoute =
const hasDeliverableRoute = Boolean(
shouldDeliverExternally &&
canInheritDeliverableRoute &&
routeChannelCandidate &&
routeChannelCandidate !== INTERNAL_MESSAGE_CHANNEL &&
typeof routeToCandidate === "string" &&
routeToCandidate.trim().length > 0;
routeToCandidate.trim().length > 0,
);
const originatingChannel = hasDeliverableRoute
? routeChannelCandidate
: INTERNAL_MESSAGE_CHANNEL;