Compare commits
8 Commits
main
...
vincentkoc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b8fe23a55f | ||
|
|
f93fe9aa5e | ||
|
|
7f59a15442 | ||
|
|
5e381dc71f | ||
|
|
6c2642c9b2 | ||
|
|
8fb8d08eaf | ||
|
|
0f4950dad6 | ||
|
|
22043197ba |
@ -34,6 +34,38 @@ describe("lookupContextTokens", () => {
|
|||||||
expect(lookupContextTokens("openrouter/claude-sonnet")).toBe(321_000);
|
expect(lookupContextTokens("openrouter/claude-sonnet")).toBe(321_000);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("adds GPT-5.4 context windows from forward-compat discovery fallbacks", async () => {
|
||||||
|
vi.doMock("../config/config.js", () => ({
|
||||||
|
loadConfig: () => ({
|
||||||
|
models: { providers: {} },
|
||||||
|
}),
|
||||||
|
}));
|
||||||
|
vi.doMock("./models-config.js", () => ({
|
||||||
|
ensureOpenClawModelsJson: vi.fn(async () => {}),
|
||||||
|
}));
|
||||||
|
vi.doMock("./agent-paths.js", () => ({
|
||||||
|
resolveOpenClawAgentDir: () => "/tmp/openclaw-agent",
|
||||||
|
}));
|
||||||
|
vi.doMock("./pi-model-discovery.js", () => ({
|
||||||
|
discoverAuthStorage: vi.fn(() => ({})),
|
||||||
|
discoverModels: vi.fn(() => ({
|
||||||
|
getAll: () => [
|
||||||
|
{
|
||||||
|
provider: "openai",
|
||||||
|
id: "gpt-5.2",
|
||||||
|
contextWindow: 400_000,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
})),
|
||||||
|
}));
|
||||||
|
|
||||||
|
const { lookupContextTokens } = await import("./context.js");
|
||||||
|
await vi.waitFor(() => {
|
||||||
|
expect(lookupContextTokens("gpt-5.4")).toBe(1_050_000);
|
||||||
|
expect(lookupContextTokens("gpt-5.4-pro")).toBe(1_050_000);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
it("does not skip eager warmup when --profile is followed by -- terminator", async () => {
|
it("does not skip eager warmup when --profile is followed by -- terminator", async () => {
|
||||||
const loadConfigMock = vi.fn(() => ({ models: {} }));
|
const loadConfigMock = vi.fn(() => ({ models: {} }));
|
||||||
vi.doMock("../config/config.js", () => ({
|
vi.doMock("../config/config.js", () => ({
|
||||||
|
|||||||
@ -1,17 +1,19 @@
|
|||||||
// Lazy-load pi-coding-agent model metadata so we can infer context windows when
|
// Lazy-load pi-coding-agent model metadata so we can infer context windows when
|
||||||
// the agent reports a model id. This includes custom models.json entries.
|
// the agent reports a model id. This includes custom models.json entries.
|
||||||
|
|
||||||
|
import type { Api, Model } from "@mariozechner/pi-ai";
|
||||||
import { loadConfig } from "../config/config.js";
|
import { loadConfig } from "../config/config.js";
|
||||||
import type { OpenClawConfig } from "../config/config.js";
|
import type { OpenClawConfig } from "../config/config.js";
|
||||||
import { computeBackoff, type BackoffPolicy } from "../infra/backoff.js";
|
import { computeBackoff, type BackoffPolicy } from "../infra/backoff.js";
|
||||||
import { consumeRootOptionToken, FLAG_TERMINATOR } from "../infra/cli-root-options.js";
|
import { consumeRootOptionToken, FLAG_TERMINATOR } from "../infra/cli-root-options.js";
|
||||||
import { resolveOpenClawAgentDir } from "./agent-paths.js";
|
import { resolveOpenClawAgentDir } from "./agent-paths.js";
|
||||||
|
import { augmentKnownForwardCompatModels } from "./model-forward-compat.js";
|
||||||
import { ensureOpenClawModelsJson } from "./models-config.js";
|
import { ensureOpenClawModelsJson } from "./models-config.js";
|
||||||
|
|
||||||
type ModelEntry = { id: string; contextWindow?: number };
|
type ModelEntry = { id: string; provider?: string; contextWindow?: number };
|
||||||
type ModelRegistryLike = {
|
type ModelRegistryLike = {
|
||||||
getAvailable?: () => ModelEntry[];
|
getAvailable?: () => Model<Api>[];
|
||||||
getAll: () => ModelEntry[];
|
getAll: () => Model<Api>[];
|
||||||
};
|
};
|
||||||
type ConfigModelEntry = { id?: string; contextWindow?: number };
|
type ConfigModelEntry = { id?: string; contextWindow?: number };
|
||||||
type ProviderConfigEntry = { models?: ConfigModelEntry[] };
|
type ProviderConfigEntry = { models?: ConfigModelEntry[] };
|
||||||
@ -156,10 +158,11 @@ function ensureContextWindowCacheLoaded(): Promise<void> {
|
|||||||
const agentDir = resolveOpenClawAgentDir();
|
const agentDir = resolveOpenClawAgentDir();
|
||||||
const authStorage = discoverAuthStorage(agentDir);
|
const authStorage = discoverAuthStorage(agentDir);
|
||||||
const modelRegistry = discoverModels(authStorage, agentDir) as unknown as ModelRegistryLike;
|
const modelRegistry = discoverModels(authStorage, agentDir) as unknown as ModelRegistryLike;
|
||||||
const models =
|
const models = augmentKnownForwardCompatModels(
|
||||||
typeof modelRegistry.getAvailable === "function"
|
typeof modelRegistry.getAvailable === "function"
|
||||||
? modelRegistry.getAvailable()
|
? modelRegistry.getAvailable()
|
||||||
: modelRegistry.getAll();
|
: modelRegistry.getAll(),
|
||||||
|
);
|
||||||
applyDiscoveredContextWindows({
|
applyDiscoveredContextWindows({
|
||||||
cache: MODEL_CACHE,
|
cache: MODEL_CACHE,
|
||||||
models,
|
models,
|
||||||
|
|||||||
13
src/agents/live-model-filter.test.ts
Normal file
13
src/agents/live-model-filter.test.ts
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
import { describe, expect, it } from "vitest";
|
||||||
|
import { isModernModelRef } from "./live-model-filter.js";
|
||||||
|
|
||||||
|
describe("isModernModelRef", () => {
|
||||||
|
it("accepts new openai gpt-5.4 refs", () => {
|
||||||
|
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4" })).toBe(true);
|
||||||
|
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4-pro" })).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("keeps rejecting older openai refs outside the allowlist", () => {
|
||||||
|
expect(isModernModelRef({ provider: "openai", id: "gpt-4.1" })).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
@ -46,6 +46,8 @@ export function isModernModelRef(ref: ModelRef): boolean {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (provider === "openai") {
|
if (provider === "openai") {
|
||||||
|
// Keep the broader prefix match for GPT-5.x families so live tests keep opting into
|
||||||
|
// fresh OpenAI minor variants before the forward-compat catalog learns each exact ID.
|
||||||
return matchesExactOrPrefix(id, OPENAI_MODELS);
|
return matchesExactOrPrefix(id, OPENAI_MODELS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -307,7 +307,7 @@ describe("resolveForwardCompatModel", () => {
|
|||||||
expect(model?.reasoning).toBe(true);
|
expect(model?.reasoning).toBe(true);
|
||||||
expect(model?.contextWindow).toBe(1_050_000);
|
expect(model?.contextWindow).toBe(1_050_000);
|
||||||
expect(model?.maxTokens).toBe(128_000);
|
expect(model?.maxTokens).toBe(128_000);
|
||||||
expect(model?.cost).toEqual({ input: 0, output: 0, cacheRead: 0, cacheWrite: 0 });
|
expect(model?.cost).toEqual({ input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 });
|
||||||
});
|
});
|
||||||
|
|
||||||
it("resolves openai gpt-5.4-pro via template fallback", () => {
|
it("resolves openai gpt-5.4-pro via template fallback", () => {
|
||||||
@ -320,6 +320,7 @@ describe("resolveForwardCompatModel", () => {
|
|||||||
expect(model?.baseUrl).toBe("https://api.openai.com/v1");
|
expect(model?.baseUrl).toBe("https://api.openai.com/v1");
|
||||||
expect(model?.contextWindow).toBe(1_050_000);
|
expect(model?.contextWindow).toBe(1_050_000);
|
||||||
expect(model?.maxTokens).toBe(128_000);
|
expect(model?.maxTokens).toBe(128_000);
|
||||||
|
expect(model?.cost).toEqual({ input: 30, output: 180, cacheRead: 0, cacheWrite: 0 });
|
||||||
});
|
});
|
||||||
|
|
||||||
it("resolves openai-codex gpt-5.4 via codex template fallback", () => {
|
it("resolves openai-codex gpt-5.4 via codex template fallback", () => {
|
||||||
|
|||||||
@ -10,10 +10,14 @@ const OPENAI_GPT_54_CONTEXT_TOKENS = 1_050_000;
|
|||||||
const OPENAI_GPT_54_MAX_TOKENS = 128_000;
|
const OPENAI_GPT_54_MAX_TOKENS = 128_000;
|
||||||
const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const;
|
const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const;
|
||||||
const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const;
|
const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const;
|
||||||
|
const OPENAI_GPT_54_COST = { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 } as const;
|
||||||
|
// OpenAI currently publishes no cached-input price for GPT-5.4 Pro.
|
||||||
|
const OPENAI_GPT_54_PRO_COST = { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 } as const;
|
||||||
|
|
||||||
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
|
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
|
||||||
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
|
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
|
||||||
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
|
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
|
||||||
|
const OPENAI_CODEX_GPT_53_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
|
||||||
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
|
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
|
||||||
|
|
||||||
const ANTHROPIC_OPUS_46_MODEL_ID = "claude-opus-4-6";
|
const ANTHROPIC_OPUS_46_MODEL_ID = "claude-opus-4-6";
|
||||||
@ -55,35 +59,23 @@ function resolveOpenAIGpt54ForwardCompatModel(
|
|||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (
|
const template = cloneFirstTemplateModel({
|
||||||
cloneFirstTemplateModel({
|
normalizedProvider,
|
||||||
normalizedProvider,
|
trimmedModelId,
|
||||||
trimmedModelId,
|
templateIds: [...templateIds],
|
||||||
templateIds: [...templateIds],
|
modelRegistry,
|
||||||
modelRegistry,
|
patch: {
|
||||||
patch: {
|
|
||||||
api: "openai-responses",
|
|
||||||
provider: normalizedProvider,
|
|
||||||
baseUrl: "https://api.openai.com/v1",
|
|
||||||
reasoning: true,
|
|
||||||
input: ["text", "image"],
|
|
||||||
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
|
|
||||||
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
|
|
||||||
},
|
|
||||||
}) ??
|
|
||||||
normalizeModelCompat({
|
|
||||||
id: trimmedModelId,
|
|
||||||
name: trimmedModelId,
|
|
||||||
api: "openai-responses",
|
api: "openai-responses",
|
||||||
provider: normalizedProvider,
|
provider: normalizedProvider,
|
||||||
baseUrl: "https://api.openai.com/v1",
|
baseUrl: "https://api.openai.com/v1",
|
||||||
reasoning: true,
|
reasoning: true,
|
||||||
input: ["text", "image"],
|
input: ["text", "image"],
|
||||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
||||||
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
|
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
|
||||||
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
|
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
|
||||||
} as Model<Api>)
|
},
|
||||||
);
|
});
|
||||||
|
|
||||||
|
return buildOpenAIGpt54FallbackModel(trimmedModelId, template);
|
||||||
}
|
}
|
||||||
|
|
||||||
function cloneFirstTemplateModel(params: {
|
function cloneFirstTemplateModel(params: {
|
||||||
@ -109,6 +101,179 @@ function cloneFirstTemplateModel(params: {
|
|||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function cloneSyntheticTemplateModel(params: {
|
||||||
|
models: Model<Api>[];
|
||||||
|
normalizedProvider: string;
|
||||||
|
trimmedModelId: string;
|
||||||
|
templateIds: readonly string[];
|
||||||
|
patch?: Partial<Model<Api>>;
|
||||||
|
}): Model<Api> | undefined {
|
||||||
|
const { models, normalizedProvider, trimmedModelId, templateIds, patch } = params;
|
||||||
|
for (const templateId of [...new Set(templateIds)].filter(Boolean)) {
|
||||||
|
const template =
|
||||||
|
models.find(
|
||||||
|
(model) =>
|
||||||
|
normalizeProviderId(model.provider) === normalizedProvider &&
|
||||||
|
model.id.trim().toLowerCase() === templateId.toLowerCase(),
|
||||||
|
) ?? null;
|
||||||
|
if (!template) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return normalizeModelCompat({
|
||||||
|
...template,
|
||||||
|
id: trimmedModelId,
|
||||||
|
name: trimmedModelId,
|
||||||
|
...patch,
|
||||||
|
} as Model<Api>);
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildOpenAIGpt54FallbackModel(modelId: string, template?: Model<Api> | null): Model<Api> {
|
||||||
|
return normalizeModelCompat({
|
||||||
|
...template,
|
||||||
|
id: modelId,
|
||||||
|
name: modelId,
|
||||||
|
api: "openai-responses",
|
||||||
|
provider: "openai",
|
||||||
|
baseUrl: "https://api.openai.com/v1",
|
||||||
|
reasoning: true,
|
||||||
|
input: ["text", "image"],
|
||||||
|
cost:
|
||||||
|
modelId.toLowerCase() === OPENAI_GPT_54_PRO_MODEL_ID
|
||||||
|
? OPENAI_GPT_54_PRO_COST
|
||||||
|
: OPENAI_GPT_54_COST,
|
||||||
|
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
|
||||||
|
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
|
||||||
|
} as Model<Api>);
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildOpenAICodexSparkFallbackModel(template?: Model<Api> | null): Model<Api> {
|
||||||
|
return normalizeModelCompat({
|
||||||
|
...template,
|
||||||
|
id: OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
|
||||||
|
name: OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
|
||||||
|
api: "openai-codex-responses",
|
||||||
|
provider: "openai-codex",
|
||||||
|
baseUrl: "https://chatgpt.com/backend-api",
|
||||||
|
reasoning: true,
|
||||||
|
input: ["text", "image"],
|
||||||
|
cost: template?.cost ?? { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||||
|
contextWindow: template?.contextWindow ?? DEFAULT_CONTEXT_TOKENS,
|
||||||
|
maxTokens: template?.maxTokens ?? DEFAULT_CONTEXT_TOKENS,
|
||||||
|
} as Model<Api>);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function augmentKnownForwardCompatModels(models: Model<Api>[]): Model<Api>[] {
|
||||||
|
const next = [...models];
|
||||||
|
const existing = new Set(
|
||||||
|
next.map((model) => `${normalizeProviderId(model.provider)}::${model.id.trim().toLowerCase()}`),
|
||||||
|
);
|
||||||
|
const hasProvider = (provider: string) =>
|
||||||
|
next.some((model) => normalizeProviderId(model.provider) === provider);
|
||||||
|
const pushIfMissing = (provider: string, id: string, model: Model<Api> | undefined) => {
|
||||||
|
const key = `${normalizeProviderId(provider)}::${id.trim().toLowerCase()}`;
|
||||||
|
if (existing.has(key) || !model) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
next.push(model);
|
||||||
|
existing.add(key);
|
||||||
|
};
|
||||||
|
|
||||||
|
if (hasProvider("openai")) {
|
||||||
|
pushIfMissing(
|
||||||
|
"openai",
|
||||||
|
OPENAI_GPT_54_MODEL_ID,
|
||||||
|
buildOpenAIGpt54FallbackModel(
|
||||||
|
OPENAI_GPT_54_MODEL_ID,
|
||||||
|
cloneSyntheticTemplateModel({
|
||||||
|
models: next,
|
||||||
|
normalizedProvider: "openai",
|
||||||
|
trimmedModelId: OPENAI_GPT_54_MODEL_ID,
|
||||||
|
templateIds: OPENAI_GPT_54_TEMPLATE_MODEL_IDS,
|
||||||
|
patch: {
|
||||||
|
api: "openai-responses",
|
||||||
|
provider: "openai",
|
||||||
|
baseUrl: "https://api.openai.com/v1",
|
||||||
|
reasoning: true,
|
||||||
|
input: ["text", "image"],
|
||||||
|
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
|
||||||
|
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
pushIfMissing(
|
||||||
|
"openai",
|
||||||
|
OPENAI_GPT_54_PRO_MODEL_ID,
|
||||||
|
buildOpenAIGpt54FallbackModel(
|
||||||
|
OPENAI_GPT_54_PRO_MODEL_ID,
|
||||||
|
cloneSyntheticTemplateModel({
|
||||||
|
models: next,
|
||||||
|
normalizedProvider: "openai",
|
||||||
|
trimmedModelId: OPENAI_GPT_54_PRO_MODEL_ID,
|
||||||
|
templateIds: OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS,
|
||||||
|
patch: {
|
||||||
|
api: "openai-responses",
|
||||||
|
provider: "openai",
|
||||||
|
baseUrl: "https://api.openai.com/v1",
|
||||||
|
reasoning: true,
|
||||||
|
input: ["text", "image"],
|
||||||
|
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
|
||||||
|
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hasProvider("openai-codex")) {
|
||||||
|
pushIfMissing(
|
||||||
|
"openai-codex",
|
||||||
|
OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||||
|
cloneSyntheticTemplateModel({
|
||||||
|
models: next,
|
||||||
|
normalizedProvider: "openai-codex",
|
||||||
|
trimmedModelId: OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||||
|
templateIds: OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS,
|
||||||
|
}) ??
|
||||||
|
normalizeModelCompat({
|
||||||
|
id: OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||||
|
name: OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||||
|
api: "openai-codex-responses",
|
||||||
|
provider: "openai-codex",
|
||||||
|
baseUrl: "https://chatgpt.com/backend-api",
|
||||||
|
reasoning: true,
|
||||||
|
input: ["text", "image"],
|
||||||
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||||
|
contextWindow: DEFAULT_CONTEXT_TOKENS,
|
||||||
|
maxTokens: DEFAULT_CONTEXT_TOKENS,
|
||||||
|
} as Model<Api>),
|
||||||
|
);
|
||||||
|
pushIfMissing(
|
||||||
|
"openai-codex",
|
||||||
|
OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
|
||||||
|
buildOpenAICodexSparkFallbackModel(
|
||||||
|
cloneSyntheticTemplateModel({
|
||||||
|
models: next,
|
||||||
|
normalizedProvider: "openai-codex",
|
||||||
|
trimmedModelId: OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
|
||||||
|
templateIds: [OPENAI_CODEX_GPT_53_MODEL_ID, ...OPENAI_CODEX_TEMPLATE_MODEL_IDS],
|
||||||
|
patch: {
|
||||||
|
api: "openai-codex-responses",
|
||||||
|
provider: "openai-codex",
|
||||||
|
baseUrl: "https://chatgpt.com/backend-api",
|
||||||
|
reasoning: true,
|
||||||
|
input: ["text", "image"],
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return next;
|
||||||
|
}
|
||||||
|
|
||||||
const CODEX_GPT54_ELIGIBLE_PROVIDERS = new Set(["openai-codex"]);
|
const CODEX_GPT54_ELIGIBLE_PROVIDERS = new Set(["openai-codex"]);
|
||||||
const CODEX_GPT53_ELIGIBLE_PROVIDERS = new Set(["openai-codex", "github-copilot"]);
|
const CODEX_GPT53_ELIGIBLE_PROVIDERS = new Set(["openai-codex", "github-copilot"]);
|
||||||
|
|
||||||
|
|||||||
@ -251,7 +251,6 @@ describe("resolveModel", () => {
|
|||||||
expect(result.model?.contextWindow).toBe(262144);
|
expect(result.model?.contextWindow).toBe(262144);
|
||||||
expect(result.model?.maxTokens).toBe(32768);
|
expect(result.model?.maxTokens).toBe(32768);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("propagates reasoning from matching configured fallback model", () => {
|
it("propagates reasoning from matching configured fallback model", () => {
|
||||||
const cfg = {
|
const cfg = {
|
||||||
models: {
|
models: {
|
||||||
@ -446,6 +445,30 @@ describe("resolveModel", () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("uses GPT-5.4 Pro pricing when cloning an older openai template", () => {
|
||||||
|
mockDiscoveredModel({
|
||||||
|
provider: "openai",
|
||||||
|
modelId: "gpt-5.2",
|
||||||
|
templateModel: buildForwardCompatTemplate({
|
||||||
|
id: "gpt-5.2",
|
||||||
|
name: "GPT-5.2",
|
||||||
|
provider: "openai",
|
||||||
|
api: "openai-responses",
|
||||||
|
baseUrl: "https://api.openai.com/v1",
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = resolveModel("openai", "gpt-5.4-pro", "/tmp/agent");
|
||||||
|
|
||||||
|
expect(result.error).toBeUndefined();
|
||||||
|
expect(result.model?.cost).toEqual({
|
||||||
|
input: 30,
|
||||||
|
output: 180,
|
||||||
|
cacheRead: 0,
|
||||||
|
cacheWrite: 0,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
it("builds an anthropic forward-compat fallback for claude-opus-4-6", () => {
|
it("builds an anthropic forward-compat fallback for claude-opus-4-6", () => {
|
||||||
mockDiscoveredModel({
|
mockDiscoveredModel({
|
||||||
provider: "anthropic",
|
provider: "anthropic",
|
||||||
|
|||||||
@ -42,6 +42,11 @@ describe("normalizeThinkLevel", () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
describe("listThinkingLevels", () => {
|
describe("listThinkingLevels", () => {
|
||||||
|
it("includes xhigh for openai gpt-5.4 refs", () => {
|
||||||
|
expect(listThinkingLevels("openai", "gpt-5.4")).toContain("xhigh");
|
||||||
|
expect(listThinkingLevels("openai", "gpt-5.4-pro")).toContain("xhigh");
|
||||||
|
});
|
||||||
|
|
||||||
it("includes xhigh for codex models", () => {
|
it("includes xhigh for codex models", () => {
|
||||||
expect(listThinkingLevels(undefined, "gpt-5.2-codex")).toContain("xhigh");
|
expect(listThinkingLevels(undefined, "gpt-5.2-codex")).toContain("xhigh");
|
||||||
expect(listThinkingLevels(undefined, "gpt-5.3-codex")).toContain("xhigh");
|
expect(listThinkingLevels(undefined, "gpt-5.3-codex")).toContain("xhigh");
|
||||||
|
|||||||
@ -264,6 +264,42 @@ describe("models list/status", () => {
|
|||||||
expect(payload.models[0]?.available).toBe(false);
|
expect(payload.models[0]?.available).toBe(false);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("loadModelRegistry augments raw OpenAI models with GPT-5.4 fallbacks", async () => {
|
||||||
|
modelRegistryState.models = [
|
||||||
|
{
|
||||||
|
provider: "openai",
|
||||||
|
id: "gpt-5.2",
|
||||||
|
name: "GPT-5.2",
|
||||||
|
api: "openai-responses",
|
||||||
|
input: ["text", "image"],
|
||||||
|
baseUrl: "https://api.openai.com/v1",
|
||||||
|
contextWindow: 400_000,
|
||||||
|
maxTokens: 128_000,
|
||||||
|
reasoning: true,
|
||||||
|
cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 },
|
||||||
|
},
|
||||||
|
];
|
||||||
|
modelRegistryState.available = modelRegistryState.models;
|
||||||
|
|
||||||
|
const { models } = await loadModelRegistry({});
|
||||||
|
expect(models).toEqual(
|
||||||
|
expect.arrayContaining([
|
||||||
|
expect.objectContaining({
|
||||||
|
provider: "openai",
|
||||||
|
id: "gpt-5.4",
|
||||||
|
api: "openai-responses",
|
||||||
|
contextWindow: 1_050_000,
|
||||||
|
}),
|
||||||
|
expect.objectContaining({
|
||||||
|
provider: "openai",
|
||||||
|
id: "gpt-5.4-pro",
|
||||||
|
api: "openai-responses",
|
||||||
|
contextWindow: 1_050_000,
|
||||||
|
}),
|
||||||
|
]),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
it("models list does not treat availability-unavailable code as discovery fallback", async () => {
|
it("models list does not treat availability-unavailable code as discovery fallback", async () => {
|
||||||
configureGoogleAntigravityModel("claude-opus-4-6-thinking");
|
configureGoogleAntigravityModel("claude-opus-4-6-thinking");
|
||||||
modelRegistryState.getAllError = Object.assign(new Error("model discovery failed"), {
|
modelRegistryState.getAllError = Object.assign(new Error("model discovery failed"), {
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import {
|
|||||||
resolveAwsSdkEnvVarName,
|
resolveAwsSdkEnvVarName,
|
||||||
resolveEnvApiKey,
|
resolveEnvApiKey,
|
||||||
} from "../../agents/model-auth.js";
|
} from "../../agents/model-auth.js";
|
||||||
|
import { augmentKnownForwardCompatModels } from "../../agents/model-forward-compat.js";
|
||||||
import { ensureOpenClawModelsJson } from "../../agents/models-config.js";
|
import { ensureOpenClawModelsJson } from "../../agents/models-config.js";
|
||||||
import { discoverAuthStorage, discoverModels } from "../../agents/pi-model-discovery.js";
|
import { discoverAuthStorage, discoverModels } from "../../agents/pi-model-discovery.js";
|
||||||
import type { OpenClawConfig } from "../../config/config.js";
|
import type { OpenClawConfig } from "../../config/config.js";
|
||||||
@ -99,7 +100,7 @@ export async function loadModelRegistry(cfg: OpenClawConfig) {
|
|||||||
const agentDir = resolveOpenClawAgentDir();
|
const agentDir = resolveOpenClawAgentDir();
|
||||||
const authStorage = discoverAuthStorage(agentDir);
|
const authStorage = discoverAuthStorage(agentDir);
|
||||||
const registry = discoverModels(authStorage, agentDir);
|
const registry = discoverModels(authStorage, agentDir);
|
||||||
const models = registry.getAll();
|
const models = augmentKnownForwardCompatModels(registry.getAll());
|
||||||
let availableKeys: Set<string> | undefined;
|
let availableKeys: Set<string> | undefined;
|
||||||
let availabilityErrorMessage: string | undefined;
|
let availabilityErrorMessage: string | undefined;
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user