From d1d46c6cfb2b9b8a21493dad3d46a1e1ed02131a Mon Sep 17 00:00:00 2001 From: Vincent Koc Date: Fri, 20 Mar 2026 15:15:15 -0700 Subject: [PATCH] test(openai): broaden live model coverage --- extensions/openai/openai-provider.test.ts | 168 +++++++++++++++------- 1 file changed, 117 insertions(+), 51 deletions(-) diff --git a/extensions/openai/openai-provider.test.ts b/extensions/openai/openai-provider.test.ts index 4535d3a7cc2..52182c2b44a 100644 --- a/extensions/openai/openai-provider.test.ts +++ b/extensions/openai/openai-provider.test.ts @@ -3,9 +3,71 @@ import { describe, expect, it } from "vitest"; import { buildOpenAIProvider } from "./openai-provider.js"; const OPENAI_API_KEY = process.env.OPENAI_API_KEY ?? ""; +const DEFAULT_LIVE_MODEL_IDS = ["gpt-5.4-mini", "gpt-5.4-nano"] as const; const liveEnabled = OPENAI_API_KEY.trim().length > 0 && process.env.OPENCLAW_LIVE_TEST === "1"; const describeLive = liveEnabled ? describe : describe.skip; +type LiveModelCase = { + modelId: string; + templateId: string; + templateName: string; + cost: { input: number; output: number; cacheRead: number; cacheWrite: number }; + contextWindow: number; + maxTokens: number; +}; + +function resolveLiveModelCase(modelId: string): LiveModelCase { + switch (modelId) { + case "gpt-5.4": + return { + modelId, + templateId: "gpt-5.2", + templateName: "GPT-5.2", + cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 400_000, + maxTokens: 128_000, + }; + case "gpt-5.4-pro": + return { + modelId, + templateId: "gpt-5.2-pro", + templateName: "GPT-5.2 Pro", + cost: { input: 15, output: 60, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 400_000, + maxTokens: 128_000, + }; + case "gpt-5.4-mini": + return { + modelId, + templateId: "gpt-5-mini", + templateName: "GPT-5 mini", + cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 400_000, + maxTokens: 128_000, + }; + case "gpt-5.4-nano": + return { + modelId, + templateId: "gpt-5-nano", + templateName: "GPT-5 nano", + cost: { input: 0.5, output: 1, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 200_000, + maxTokens: 64_000, + }; + default: + throw new Error(`Unsupported live OpenAI model: ${modelId}`); + } +} + +function resolveLiveModelCases(raw?: string): LiveModelCase[] { + const requested = raw + ?.split(",") + .map((value) => value.trim()) + .filter(Boolean); + const modelIds = requested?.length ? requested : [...DEFAULT_LIVE_MODEL_IDS]; + return [...new Set(modelIds)].map((modelId) => resolveLiveModelCase(modelId)); +} + describe("buildOpenAIProvider", () => { it("resolves gpt-5.4 mini and nano from GPT-5 small-model templates", () => { const provider = buildOpenAIProvider(); @@ -113,63 +175,67 @@ describe("buildOpenAIProvider", () => { }); describeLive("buildOpenAIProvider live", () => { - it("resolves a live model and completes through the OpenAI responses API", async () => { - const provider = buildOpenAIProvider(); - const registry = { - find(providerId: string, id: string) { - if (providerId !== "openai") { + it.each(resolveLiveModelCases(process.env.OPENCLAW_LIVE_OPENAI_MODELS))( + "resolves %s and completes through the OpenAI responses API", + async (liveCase) => { + const provider = buildOpenAIProvider(); + const registry = { + find(providerId: string, id: string) { + if (providerId !== "openai") { + return null; + } + if (id === liveCase.templateId) { + return { + id: liveCase.templateId, + name: liveCase.templateName, + provider: "openai", + api: "openai-completions", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: liveCase.cost, + contextWindow: liveCase.contextWindow, + maxTokens: liveCase.maxTokens, + }; + } return null; - } - if (id === "gpt-5-nano") { - return { - id, - name: "GPT-5 nano", - provider: "openai", - api: "openai-completions", - baseUrl: "https://api.openai.com/v1", - reasoning: true, - input: ["text", "image"], - cost: { input: 0.5, output: 1, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 200_000, - maxTokens: 64_000, - }; - } - return null; - }, - }; + }, + }; - const resolved = provider.resolveDynamicModel?.({ - provider: "openai", - modelId: "gpt-5.4-nano", - modelRegistry: registry as never, - }); + const resolved = provider.resolveDynamicModel?.({ + provider: "openai", + modelId: liveCase.modelId, + modelRegistry: registry as never, + }); - expect(resolved).toBeDefined(); + expect(resolved).toBeDefined(); - const normalized = provider.normalizeResolvedModel?.({ - provider: "openai", - modelId: resolved!.id, - model: resolved!, - }); + const normalized = provider.normalizeResolvedModel?.({ + provider: "openai", + modelId: resolved!.id, + model: resolved!, + }); - expect(normalized).toMatchObject({ - provider: "openai", - id: "gpt-5.4-nano", - api: "openai-responses", - baseUrl: "https://api.openai.com/v1", - }); + expect(normalized).toMatchObject({ + provider: "openai", + id: liveCase.modelId, + api: "openai-responses", + baseUrl: "https://api.openai.com/v1", + }); - const client = new OpenAI({ - apiKey: OPENAI_API_KEY, - baseURL: normalized?.baseUrl, - }); + const client = new OpenAI({ + apiKey: OPENAI_API_KEY, + baseURL: normalized?.baseUrl, + }); - const response = await client.responses.create({ - model: normalized?.id ?? "gpt-5.4-nano", - input: "Reply with exactly OK.", - max_output_tokens: 16, - }); + const response = await client.responses.create({ + model: normalized?.id ?? liveCase.modelId, + input: "Reply with exactly OK.", + max_output_tokens: 16, + }); - expect(response.output_text.trim()).toBe("OK"); - }, 30_000); + expect(response.output_text.trim()).toMatch(/^OK[.!]?$/); + }, + 30_000, + ); });