* Models: gate custom provider keys by usable secret semantics * Config: project runtime writes onto source snapshot * Models: prevent stale apiKey preservation for marker-managed providers * Runner: strip SecretRef marker headers from resolved models * Secrets: scan active agent models.json path in audit * Config: guard runtime-source projection for unrelated configs * Extensions: fix onboarding type errors in CI * Tests: align setup helper account-enabled expectation * Secrets audit: harden models.json file reads * fix: harden SecretRef custom/provider secret persistence (#42554) (thanks @joshavant)
117 lines
3.2 KiB
TypeScript
117 lines
3.2 KiB
TypeScript
import { describe, expect, it } from "vitest";
|
|
import {
|
|
mergeProviderModels,
|
|
mergeProviders,
|
|
mergeWithExistingProviderSecrets,
|
|
type ExistingProviderConfig,
|
|
} from "./models-config.merge.js";
|
|
import type { ProviderConfig } from "./models-config.providers.js";
|
|
|
|
describe("models-config merge helpers", () => {
|
|
const preservedApiKey = "AGENT_KEY"; // pragma: allowlist secret
|
|
|
|
it("refreshes implicit model metadata while preserving explicit reasoning overrides", () => {
|
|
const merged = mergeProviderModels(
|
|
{
|
|
api: "openai-responses",
|
|
models: [
|
|
{
|
|
id: "gpt-5.4",
|
|
name: "GPT-5.4",
|
|
input: ["text"],
|
|
reasoning: true,
|
|
contextWindow: 1_000_000,
|
|
maxTokens: 100_000,
|
|
},
|
|
],
|
|
} as ProviderConfig,
|
|
{
|
|
api: "openai-responses",
|
|
models: [
|
|
{
|
|
id: "gpt-5.4",
|
|
name: "GPT-5.4",
|
|
input: ["image"],
|
|
reasoning: false,
|
|
contextWindow: 2_000_000,
|
|
maxTokens: 200_000,
|
|
},
|
|
],
|
|
} as ProviderConfig,
|
|
);
|
|
|
|
expect(merged.models).toEqual([
|
|
expect.objectContaining({
|
|
id: "gpt-5.4",
|
|
input: ["text"],
|
|
reasoning: false,
|
|
contextWindow: 2_000_000,
|
|
maxTokens: 200_000,
|
|
}),
|
|
]);
|
|
});
|
|
|
|
it("merges explicit providers onto trimmed keys", () => {
|
|
const merged = mergeProviders({
|
|
explicit: {
|
|
" custom ": {
|
|
api: "openai-responses",
|
|
models: [] as ProviderConfig["models"],
|
|
} as ProviderConfig,
|
|
},
|
|
});
|
|
|
|
expect(merged).toEqual({
|
|
custom: expect.objectContaining({ api: "openai-responses" }),
|
|
});
|
|
});
|
|
|
|
it("replaces stale baseUrl when model api surface changes", () => {
|
|
const merged = mergeWithExistingProviderSecrets({
|
|
nextProviders: {
|
|
custom: {
|
|
baseUrl: "https://config.example/v1",
|
|
models: [{ id: "model", api: "openai-responses" }],
|
|
} as ProviderConfig,
|
|
},
|
|
existingProviders: {
|
|
custom: {
|
|
baseUrl: "https://agent.example/v1",
|
|
apiKey: preservedApiKey,
|
|
models: [{ id: "model", api: "openai-completions" }],
|
|
} as ExistingProviderConfig,
|
|
},
|
|
secretRefManagedProviders: new Set<string>(),
|
|
explicitBaseUrlProviders: new Set<string>(),
|
|
});
|
|
|
|
expect(merged.custom).toEqual(
|
|
expect.objectContaining({
|
|
apiKey: preservedApiKey,
|
|
baseUrl: "https://config.example/v1",
|
|
}),
|
|
);
|
|
});
|
|
|
|
it("does not preserve stale plaintext apiKey when next entry is a marker", () => {
|
|
const merged = mergeWithExistingProviderSecrets({
|
|
nextProviders: {
|
|
custom: {
|
|
apiKey: "OPENAI_API_KEY", // pragma: allowlist secret
|
|
models: [{ id: "model", api: "openai-responses" }],
|
|
} as ProviderConfig,
|
|
},
|
|
existingProviders: {
|
|
custom: {
|
|
apiKey: preservedApiKey,
|
|
models: [{ id: "model", api: "openai-responses" }],
|
|
} as ExistingProviderConfig,
|
|
},
|
|
secretRefManagedProviders: new Set<string>(),
|
|
explicitBaseUrlProviders: new Set<string>(),
|
|
});
|
|
|
|
expect(merged.custom?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
|
});
|
|
});
|