openclaw/src/agents/pi-embedded-runner/model.test-harness.ts
Rudi Cilibrasi d0641c2e9e Fix stale-metadata tests to exercise the preferResolvedModel/preserveDiscoveredTransportMetadata code path
The stale-metadata tests called mockOpenAICodexTemplateModel() then immediately
overwrote the discovery mock with one that only returned gpt-5.4, causing
hasDynamicOverrideTemplate to return false and the override path to be skipped.

Introduce mockStaleCodexDiscovery() helper that preserves the gpt-5.2-codex
template in the registry alongside the stale gpt-5.4 model, so the intended
code path is now exercised by all 8 stale-metadata tests.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-20 22:51:27 -07:00

146 lines
4.2 KiB
TypeScript

import { vi } from "vitest";
import type { ModelDefinitionConfig } from "../../config/types.js";
import { discoverModels } from "../pi-model-discovery.js";
export const makeModel = (id: string): ModelDefinitionConfig => ({
id,
name: id,
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1,
maxTokens: 1,
});
export const OPENAI_CODEX_TEMPLATE_MODEL = {
id: "gpt-5.2-codex",
name: "GPT-5.2 Codex",
provider: "openai-codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"] as const,
cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 },
contextWindow: 272000,
maxTokens: 128000,
};
function mockTemplateModel(provider: string, modelId: string, templateModel: unknown): void {
mockDiscoveredModel({
provider,
modelId,
templateModel,
});
}
export function mockOpenAICodexTemplateModel(): void {
mockTemplateModel("openai-codex", "gpt-5.2-codex", OPENAI_CODEX_TEMPLATE_MODEL);
}
export function buildOpenAICodexForwardCompatExpectation(
id: string = "gpt-5.3-codex",
): Partial<ModelDefinitionConfig> & {
provider: string;
id: string;
api: string;
baseUrl: string;
} {
const isGpt54 = id === "gpt-5.4";
const isSpark = id === "gpt-5.3-codex-spark";
return {
provider: "openai-codex",
id,
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: isSpark ? ["text"] : ["text", "image"],
cost: isSpark
? { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }
: OPENAI_CODEX_TEMPLATE_MODEL.cost,
contextWindow: isGpt54 ? 1_050_000 : isSpark ? 128_000 : 272000,
maxTokens: 128000,
};
}
export const GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL = {
id: "gemini-3-pro-preview",
name: "Gemini 3 Pro Preview (Cloud Code Assist)",
provider: "google-gemini-cli",
api: "google-gemini-cli",
baseUrl: "https://cloudcode-pa.googleapis.com",
reasoning: true,
input: ["text", "image"] as const,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 64000,
};
export const GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL = {
id: "gemini-3-flash-preview",
name: "Gemini 3 Flash Preview (Cloud Code Assist)",
provider: "google-gemini-cli",
api: "google-gemini-cli",
baseUrl: "https://cloudcode-pa.googleapis.com",
reasoning: false,
input: ["text", "image"] as const,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 64000,
};
export function mockGoogleGeminiCliProTemplateModel(): void {
mockTemplateModel(
"google-gemini-cli",
"gemini-3-pro-preview",
GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL,
);
}
export function mockGoogleGeminiCliFlashTemplateModel(): void {
mockTemplateModel(
"google-gemini-cli",
"gemini-3-flash-preview",
GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL,
);
}
export function resetMockDiscoverModels(): void {
vi.mocked(discoverModels).mockReturnValue({
find: vi.fn(() => null),
} as unknown as ReturnType<typeof discoverModels>);
}
export function mockDiscoveredModel(params: {
provider: string;
modelId: string;
templateModel: unknown;
}): void {
vi.mocked(discoverModels).mockReturnValue({
find: vi.fn((provider: string, modelId: string) => {
if (provider === params.provider && modelId === params.modelId) {
return params.templateModel;
}
return null;
}),
} as unknown as ReturnType<typeof discoverModels>);
}
/**
* Mock a stale discovered gpt-5.4 model while keeping the gpt-5.2-codex
* template visible so `hasDynamicOverrideTemplate` returns true and the
* `preferResolvedModel`/`preserveDiscoveredTransportMetadata` path is exercised.
*/
export function mockStaleCodexDiscovery(staleModel: Record<string, unknown>): void {
vi.mocked(discoverModels).mockReturnValue({
find: vi.fn((provider: string, modelId: string) => {
if (provider === "openai-codex" && modelId === "gpt-5.2-codex") {
return OPENAI_CODEX_TEMPLATE_MODEL;
}
if (provider === "openai-codex" && modelId === "gpt-5.4") {
return staleModel;
}
return null;
}),
} as unknown as ReturnType<typeof discoverModels>);
}