refactor(plugins): move auth and model policy to providers

This commit is contained in:
Peter Steinberger 2026-03-15 21:45:01 -07:00
parent 3d8c29cc53
commit 7a6be3d531
No known key found for this signature in database
30 changed files with 506 additions and 513 deletions

View File

@ -20,7 +20,9 @@ For model selection rules, see [/concepts/models](/concepts/models).
OpenClaw merges that output into `models.providers` before writing
`models.json`.
- Provider manifests can declare `providerAuthEnvVars` so generic env-based
auth probes do not need to load plugin runtime.
auth probes do not need to load plugin runtime. The remaining core env-var
map is now just for non-plugin/core providers and a few generic-precedence
cases such as Anthropic API-key-first onboarding.
- Provider plugins can also own provider runtime behavior via
`resolveDynamicModel`, `prepareDynamicModel`, `normalizeResolvedModel`,
`capabilities`, `prepareExtraParams`, `wrapStreamFn`,
@ -37,6 +39,10 @@ the generic inference loop.
Typical split:
- `auth[].run` / `auth[].runNonInteractive`: provider owns onboarding/login
flows for `openclaw onboard`, `openclaw models auth`, and headless setup
- `wizard.onboarding` / `wizard.modelPicker`: provider owns auth-choice labels,
hints, and setup entries in onboarding/model pickers
- `catalog`: provider appears in `models.providers`
- `resolveDynamicModel`: provider accepts model ids not present in the local
static catalog yet

View File

@ -1230,7 +1230,8 @@ The non-interactive context includes:
- the current and base config
- parsed onboarding CLI options
- runtime logging/error helpers
- agent/workspace dirs
- agent/workspace dirs so the provider can persist auth into the same scoped
store used by the rest of onboarding
- `resolveApiKey(...)` to read provider keys from flags, env, or existing auth
profiles while honoring `--secret-input-mode`
- `toApiKeyCredential(...)` to convert a resolved key into an auth-profile
@ -1407,10 +1408,13 @@ api.registerProvider({
Notes:
- `run` receives a `ProviderAuthContext` with `prompter`, `runtime`,
`openUrl`, and `oauth.createVpsAwareHandlers` helpers.
`openUrl`, `oauth.createVpsAwareHandlers`, `secretInputMode`, and
`allowSecretRefPrompt` helpers/state. Onboarding/configure flows can use
these to honor `--secret-input-mode` or offer env/file/exec secret-ref
capture, while `openclaw models auth` keeps a tighter prompt surface.
- `runNonInteractive` receives a `ProviderAuthMethodNonInteractiveContext`
with `opts`, `resolveApiKey`, and `toApiKeyCredential` helpers for
headless onboarding.
with `opts`, `agentDir`, `resolveApiKey`, and `toApiKeyCredential` helpers
for headless onboarding.
- Return `configPatch` when you need to add default models or provider config.
- Return `defaultModel` so `--set-default` can update agent defaults.
- `wizard.setup` adds a provider choice to `openclaw onboard`.

View File

@ -5,10 +5,19 @@ import {
type ProviderResolveDynamicModelContext,
type ProviderRuntimeModel,
} from "openclaw/plugin-sdk/core";
import { upsertAuthProfile } from "../../src/agents/auth-profiles.js";
import { normalizeModelCompat } from "../../src/agents/model-compat.js";
import { parseDurationMs } from "../../src/cli/parse-duration.js";
import {
normalizeSecretInputModeInput,
promptSecretRefForOnboarding,
resolveSecretInputModeForEnvSelection,
} from "../../src/commands/auth-choice.apply-helpers.js";
import { buildTokenProfileId, validateAnthropicSetupToken } from "../../src/commands/auth-token.js";
import { applyAuthProfileConfig } from "../../src/commands/onboard-auth.js";
import { fetchClaudeUsage } from "../../src/infra/provider-usage.fetch.js";
import type { ProviderAuthResult } from "../../src/plugins/types.js";
import { normalizeSecretInput } from "../../src/utils/normalize-secret-input.js";
const PROVIDER_ID = "anthropic";
const ANTHROPIC_OPUS_46_MODEL_ID = "claude-opus-4-6";
@ -119,11 +128,41 @@ async function runAnthropicSetupToken(ctx: ProviderAuthContext): Promise<Provide
"Anthropic setup-token",
);
const tokenRaw = await ctx.prompter.text({
message: "Paste Anthropic setup-token",
validate: (value) => validateAnthropicSetupToken(String(value ?? "")),
});
const token = String(tokenRaw ?? "").trim();
const requestedSecretInputMode = normalizeSecretInputModeInput(ctx.secretInputMode);
const selectedMode = ctx.allowSecretRefPrompt
? await resolveSecretInputModeForEnvSelection({
prompter: ctx.prompter,
explicitMode: requestedSecretInputMode,
copy: {
modeMessage: "How do you want to provide this setup token?",
plaintextLabel: "Paste setup token now",
plaintextHint: "Stores the token directly in the auth profile",
},
})
: "plaintext";
let token = "";
let tokenRef: { source: "env" | "file" | "exec"; provider: string; id: string } | undefined;
if (selectedMode === "ref") {
const resolved = await promptSecretRefForOnboarding({
provider: "anthropic-setup-token",
config: ctx.config,
prompter: ctx.prompter,
preferredEnvVar: "ANTHROPIC_SETUP_TOKEN",
copy: {
sourceMessage: "Where is this Anthropic setup token stored?",
envVarPlaceholder: "ANTHROPIC_SETUP_TOKEN",
},
});
token = resolved.resolvedValue.trim();
tokenRef = resolved.ref;
} else {
const tokenRaw = await ctx.prompter.text({
message: "Paste Anthropic setup-token",
validate: (value) => validateAnthropicSetupToken(String(value ?? "")),
});
token = String(tokenRaw ?? "").trim();
}
const tokenError = validateAnthropicSetupToken(token);
if (tokenError) {
throw new Error(tokenError);
@ -145,12 +184,80 @@ async function runAnthropicSetupToken(ctx: ProviderAuthContext): Promise<Provide
type: "token",
provider: PROVIDER_ID,
token,
...(tokenRef ? { tokenRef } : {}),
},
},
],
};
}
async function runAnthropicSetupTokenNonInteractive(ctx: {
config: ProviderAuthContext["config"];
opts: {
tokenProvider?: string;
token?: string;
tokenExpiresIn?: string;
tokenProfileId?: string;
};
runtime: ProviderAuthContext["runtime"];
agentDir?: string;
}): Promise<ProviderAuthContext["config"] | null> {
const provider = ctx.opts.tokenProvider?.trim().toLowerCase();
if (!provider) {
ctx.runtime.error("Missing --token-provider for --auth-choice token.");
ctx.runtime.exit(1);
return null;
}
if (provider !== PROVIDER_ID) {
ctx.runtime.error("Only --token-provider anthropic is supported for --auth-choice token.");
ctx.runtime.exit(1);
return null;
}
const token = normalizeSecretInput(ctx.opts.token);
if (!token) {
ctx.runtime.error("Missing --token for --auth-choice token.");
ctx.runtime.exit(1);
return null;
}
const tokenError = validateAnthropicSetupToken(token);
if (tokenError) {
ctx.runtime.error(tokenError);
ctx.runtime.exit(1);
return null;
}
let expires: number | undefined;
const expiresInRaw = ctx.opts.tokenExpiresIn?.trim();
if (expiresInRaw) {
try {
expires = Date.now() + parseDurationMs(expiresInRaw, { defaultUnit: "d" });
} catch (err) {
ctx.runtime.error(`Invalid --token-expires-in: ${String(err)}`);
ctx.runtime.exit(1);
return null;
}
}
const profileId =
ctx.opts.tokenProfileId?.trim() || buildTokenProfileId({ provider: PROVIDER_ID, name: "" });
upsertAuthProfile({
profileId,
agentDir: ctx.agentDir,
credential: {
type: "token",
provider: PROVIDER_ID,
token,
...(expires ? { expires } : {}),
},
});
return applyAuthProfileConfig(ctx.config, {
profileId,
provider: PROVIDER_ID,
mode: "token",
});
}
const anthropicPlugin = {
id: PROVIDER_ID,
name: "Anthropic Provider",
@ -169,8 +276,23 @@ const anthropicPlugin = {
hint: "Paste a setup-token from `claude setup-token`",
kind: "token",
run: async (ctx: ProviderAuthContext) => await runAnthropicSetupToken(ctx),
runNonInteractive: async (ctx) =>
await runAnthropicSetupTokenNonInteractive({
config: ctx.config,
opts: ctx.opts,
runtime: ctx.runtime,
agentDir: ctx.agentDir,
}),
},
],
wizard: {
onboarding: {
choiceId: "token",
choiceLabel: "Anthropic token (paste setup-token)",
choiceHint: "Run `claude setup-token` elsewhere, then paste the token here",
methodId: "setup-token",
},
},
resolveDynamicModel: (ctx) => resolveAnthropicForwardCompatModel(ctx),
capabilities: {
providerFamily: "anthropic",

View File

@ -133,15 +133,20 @@ function resolveCodexForwardCompatModel(
}
async function runOpenAICodexOAuth(ctx: ProviderAuthContext) {
const creds = await loginOpenAICodexOAuth({
prompter: ctx.prompter,
runtime: ctx.runtime,
isRemote: ctx.isRemote,
openUrl: ctx.openUrl,
localBrowserMessage: "Complete sign-in in browser…",
});
let creds;
try {
creds = await loginOpenAICodexOAuth({
prompter: ctx.prompter,
runtime: ctx.runtime,
isRemote: ctx.isRemote,
openUrl: ctx.openUrl,
localBrowserMessage: "Complete sign-in in browser…",
});
} catch {
return { profiles: [] };
}
if (!creds) {
throw new Error("OpenAI Codex OAuth did not return credentials.");
return { profiles: [] };
}
return buildOauthProviderAuthResult({
@ -168,6 +173,14 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
run: async (ctx) => await runOpenAICodexOAuth(ctx),
},
],
wizard: {
onboarding: {
choiceId: "openai-codex",
choiceLabel: "OpenAI Codex (ChatGPT OAuth)",
choiceHint: "Browser sign-in",
methodId: "oauth",
},
},
catalog: {
order: "profile",
run: async (ctx) => {

View File

@ -1,3 +1,4 @@
import { normalizeProviderId } from "../../src/agents/model-selection.js";
import {
createPluginBackedWebSearchProvider,
getScopedCredentialValue,
@ -6,12 +7,28 @@ import {
import { emptyPluginConfigSchema } from "../../src/plugins/config-schema.js";
import type { OpenClawPluginApi } from "../../src/plugins/types.js";
const XAI_MODERN_MODEL_PREFIXES = ["grok-4"] as const;
function matchesModernXaiModel(modelId: string): boolean {
const normalized = modelId.trim().toLowerCase();
return XAI_MODERN_MODEL_PREFIXES.some((prefix) => normalized.startsWith(prefix));
}
const xaiPlugin = {
id: "xai",
name: "xAI Plugin",
description: "Bundled xAI plugin",
configSchema: emptyPluginConfigSchema(),
register(api: OpenClawPluginApi) {
api.registerProvider({
id: "xai",
label: "xAI",
docsPath: "/providers/models",
envVars: ["XAI_API_KEY"],
auth: [],
isModernModelRef: ({ provider, modelId }) =>
normalizeProviderId(provider) === "xai" ? matchesModernXaiModel(modelId) : undefined,
});
api.registerWebSearchProvider(
createPluginBackedWebSearchProvider({
id: "grok",

View File

@ -1,5 +1,9 @@
{
"id": "xai",
"providers": ["xai"],
"providerAuthEnvVars": {
"xai": ["XAI_API_KEY"]
},
"configSchema": {
"type": "object",
"additionalProperties": false,

View File

@ -5,37 +5,6 @@ export type ModelRef = {
id?: string | null;
};
const ANTHROPIC_PREFIXES = [
"claude-opus-4-6",
"claude-sonnet-4-6",
"claude-opus-4-5",
"claude-sonnet-4-5",
"claude-haiku-4-5",
];
const OPENAI_MODELS = ["gpt-5.4", "gpt-5.2", "gpt-5.0"];
const CODEX_MODELS = [
"gpt-5.4",
"gpt-5.2",
"gpt-5.2-codex",
"gpt-5.3-codex",
"gpt-5.3-codex-spark",
"gpt-5.1-codex",
"gpt-5.1-codex-mini",
"gpt-5.1-codex-max",
];
const GOOGLE_PREFIXES = ["gemini-3"];
const ZAI_PREFIXES = ["glm-5", "glm-4.7", "glm-4.7-flash", "glm-4.7-flashx"];
const MINIMAX_PREFIXES = ["minimax-m2.5", "minimax-m2.5"];
const XAI_PREFIXES = ["grok-4"];
function matchesPrefix(id: string, prefixes: string[]): boolean {
return prefixes.some((prefix) => id.startsWith(prefix));
}
function matchesExactOrPrefix(id: string, values: string[]): boolean {
return values.some((value) => id === value || id.startsWith(value));
}
export function isModernModelRef(ref: ModelRef): boolean {
const provider = ref.provider?.trim().toLowerCase() ?? "";
const id = ref.id?.trim().toLowerCase() ?? "";
@ -53,54 +22,5 @@ export function isModernModelRef(ref: ModelRef): boolean {
if (typeof pluginDecision === "boolean") {
return pluginDecision;
}
// Compatibility fallback for core-owned providers and tests that disable
// bundled provider runtime hooks.
if (provider === "anthropic") {
return matchesPrefix(id, ANTHROPIC_PREFIXES);
}
if (provider === "openai") {
return matchesExactOrPrefix(id, OPENAI_MODELS);
}
if (provider === "openai-codex") {
return matchesExactOrPrefix(id, CODEX_MODELS);
}
if (provider === "google" || provider === "google-gemini-cli") {
return matchesPrefix(id, GOOGLE_PREFIXES);
}
if (provider === "zai") {
return matchesPrefix(id, ZAI_PREFIXES);
}
if (provider === "minimax") {
return matchesPrefix(id, MINIMAX_PREFIXES);
}
if (provider === "xai") {
return matchesPrefix(id, XAI_PREFIXES);
}
if (provider === "opencode" && id.endsWith("-free")) {
return false;
}
if (provider === "opencode" && id === "alpha-glm-4.7") {
return false;
}
// Opencode MiniMax variants have been intermittently unstable in live runs;
// prefer the rest of the modern catalog for deterministic smoke coverage.
if (provider === "opencode" && matchesPrefix(id, MINIMAX_PREFIXES)) {
return false;
}
if (provider === "openrouter" || provider === "opencode" || provider === "opencode-go") {
// OpenRouter/opencode are pass-through proxies; accept any model ID
// rather than restricting to a static prefix list.
return true;
}
return false;
}

View File

@ -345,25 +345,34 @@ describe("isModernModelRef", () => {
expect(isModernModelRef({ provider: "openrouter", id: "claude-opus-4-6" })).toBe(false);
});
it("includes OpenAI gpt-5.4 variants in modern selection", () => {
it("includes plugin-advertised modern models", () => {
providerRuntimeMocks.resolveProviderModernModelRef.mockImplementation(({ provider, context }) =>
provider === "openai" && ["gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)
? true
: provider === "openai-codex" && context.modelId === "gpt-5.4"
? true
: provider === "opencode" && ["claude-opus-4-6", "gemini-3-pro"].includes(context.modelId)
? true
: provider === "opencode-go"
? true
: undefined,
);
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4" })).toBe(true);
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4-pro" })).toBe(true);
expect(isModernModelRef({ provider: "openai-codex", id: "gpt-5.4" })).toBe(true);
});
it("excludes opencode minimax variants from modern selection", () => {
expect(isModernModelRef({ provider: "opencode", id: "minimax-m2.5" })).toBe(false);
expect(isModernModelRef({ provider: "opencode", id: "minimax-m2.5" })).toBe(false);
});
it("keeps non-minimax opencode modern models", () => {
expect(isModernModelRef({ provider: "opencode", id: "claude-opus-4-6" })).toBe(true);
expect(isModernModelRef({ provider: "opencode", id: "gemini-3-pro" })).toBe(true);
});
it("accepts all opencode-go models without zen exclusions", () => {
expect(isModernModelRef({ provider: "opencode-go", id: "kimi-k2.5" })).toBe(true);
expect(isModernModelRef({ provider: "opencode-go", id: "glm-5" })).toBe(true);
expect(isModernModelRef({ provider: "opencode-go", id: "minimax-m2.5" })).toBe(true);
});
it("excludes provider-declined modern models", () => {
providerRuntimeMocks.resolveProviderModernModelRef.mockImplementation(({ provider, context }) =>
provider === "opencode" && context.modelId === "minimax-m2.5" ? false : undefined,
);
expect(isModernModelRef({ provider: "opencode", id: "minimax-m2.5" })).toBe(false);
});
});

View File

@ -70,23 +70,25 @@ describe("listThinkingLevels", () => {
expect(listThinkingLevels("demo", "demo-model")).toContain("xhigh");
});
it("includes xhigh for codex models", () => {
expect(listThinkingLevels(undefined, "gpt-5.2-codex")).toContain("xhigh");
expect(listThinkingLevels(undefined, "gpt-5.3-codex")).toContain("xhigh");
expect(listThinkingLevels(undefined, "gpt-5.3-codex-spark")).toContain("xhigh");
});
it("includes xhigh for provider-advertised models", () => {
providerRuntimeMocks.resolveProviderXHighThinking.mockImplementation(({ provider, context }) =>
(provider === "openai" && ["gpt-5.2", "gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)) ||
(provider === "openai-codex" &&
["gpt-5.2-codex", "gpt-5.3-codex", "gpt-5.3-codex-spark", "gpt-5.4"].includes(
context.modelId,
)) ||
(provider === "github-copilot" && ["gpt-5.2", "gpt-5.2-codex"].includes(context.modelId))
? true
: undefined,
);
it("includes xhigh for openai gpt-5.2 and gpt-5.4 variants", () => {
expect(listThinkingLevels("openai-codex", "gpt-5.2-codex")).toContain("xhigh");
expect(listThinkingLevels("openai-codex", "gpt-5.3-codex")).toContain("xhigh");
expect(listThinkingLevels("openai-codex", "gpt-5.3-codex-spark")).toContain("xhigh");
expect(listThinkingLevels("openai", "gpt-5.2")).toContain("xhigh");
expect(listThinkingLevels("openai", "gpt-5.4")).toContain("xhigh");
expect(listThinkingLevels("openai", "gpt-5.4-pro")).toContain("xhigh");
});
it("includes xhigh for openai-codex gpt-5.4", () => {
expect(listThinkingLevels("openai-codex", "gpt-5.4")).toContain("xhigh");
});
it("includes xhigh for github-copilot gpt-5.2 refs", () => {
expect(listThinkingLevels("github-copilot", "gpt-5.2")).toContain("xhigh");
expect(listThinkingLevels("github-copilot", "gpt-5.2-codex")).toContain("xhigh");
});
@ -108,7 +110,11 @@ describe("listThinkingLevelLabels", () => {
expect(listThinkingLevelLabels("demo", "demo-model")).toEqual(["off", "on"]);
});
it("returns on/off for ZAI", () => {
it("returns on/off for provider-advertised binary thinking", () => {
providerRuntimeMocks.resolveProviderBinaryThinking.mockImplementation(({ provider }) =>
provider === "zai" ? true : undefined,
);
expect(listThinkingLevelLabels("zai", "glm-4.7")).toEqual(["off", "on"]);
});
@ -127,7 +133,12 @@ describe("resolveThinkingDefaultForModel", () => {
);
});
it("defaults Claude 4.6 models to adaptive", () => {
it("uses provider-advertised adaptive defaults", () => {
providerRuntimeMocks.resolveProviderDefaultThinkingLevel.mockImplementation(
({ provider, context }) =>
provider === "anthropic" && context.modelId === "claude-opus-4-6" ? "adaptive" : undefined,
);
expect(
resolveThinkingDefaultForModel({ provider: "anthropic", model: "claude-opus-4-6" }),
).toBe("adaptive");

View File

@ -49,30 +49,9 @@ export function isBinaryThinkingProvider(provider?: string | null, model?: strin
if (typeof pluginDecision === "boolean") {
return pluginDecision;
}
return normalizedProvider === "zai";
return false;
}
export const XHIGH_MODEL_REFS = [
"openai/gpt-5.4",
"openai/gpt-5.4-pro",
"openai/gpt-5.2",
"openai-codex/gpt-5.4",
"openai-codex/gpt-5.3-codex",
"openai-codex/gpt-5.3-codex-spark",
"openai-codex/gpt-5.2-codex",
"openai-codex/gpt-5.1-codex",
"github-copilot/gpt-5.2-codex",
"github-copilot/gpt-5.2",
] as const;
const XHIGH_MODEL_SET = new Set(XHIGH_MODEL_REFS.map((entry) => entry.toLowerCase()));
const XHIGH_MODEL_IDS = new Set(
XHIGH_MODEL_REFS.map((entry) => entry.split("/")[1]?.toLowerCase()).filter(
(entry): entry is string => Boolean(entry),
),
);
// Normalize user-provided thinking level strings to the canonical enum.
export function normalizeThinkLevel(raw?: string | null): ThinkLevel | undefined {
if (!raw) {
@ -130,10 +109,7 @@ export function supportsXHighThinking(provider?: string | null, model?: string |
return pluginDecision;
}
}
if (providerKey) {
return XHIGH_MODEL_SET.has(`${providerKey}/${modelKey}`);
}
return XHIGH_MODEL_IDS.has(modelKey);
return false;
}
export function listThinkingLevels(provider?: string | null, model?: string | null): ThinkLevel[] {
@ -161,17 +137,7 @@ export function formatThinkingLevels(
}
export function formatXHighModelHint(): string {
const refs = [...XHIGH_MODEL_REFS] as string[];
if (refs.length === 0) {
return "unknown model";
}
if (refs.length === 1) {
return refs[0];
}
if (refs.length === 2) {
return `${refs[0]} or ${refs[1]}`;
}
return `${refs.slice(0, -1).join(", ")} or ${refs[refs.length - 1]}`;
return "provider models that advertise xhigh reasoning";
}
export function resolveThinkingDefaultForModel(params: {
@ -196,12 +162,7 @@ export function resolveThinkingDefaultForModel(params: {
return pluginDecision;
}
const isAnthropicFamilyModel =
normalizedProvider === "anthropic" ||
normalizedProvider === "amazon-bedrock" ||
modelLower.includes("anthropic/") ||
modelLower.includes(".anthropic.");
if (isAnthropicFamilyModel && CLAUDE_46_MODEL_RE.test(modelLower)) {
if (normalizedProvider === "amazon-bedrock" && CLAUDE_46_MODEL_RE.test(modelLower)) {
return "adaptive";
}
if (candidate?.reasoning) {

View File

@ -41,25 +41,24 @@ export function buildAuthChoiceOptions(params: {
env?: NodeJS.ProcessEnv;
}): AuthChoiceOption[] {
void params.store;
const options: AuthChoiceOption[] = [...BASE_AUTH_CHOICE_OPTIONS];
const seen = new Set(options.map((option) => option.value));
const optionByValue = new Map<AuthChoice, AuthChoiceOption>(
BASE_AUTH_CHOICE_OPTIONS.map((option) => [option.value, option]),
);
for (const option of resolveProviderWizardOptions({
config: params.config,
workspaceDir: params.workspaceDir,
env: params.env,
})) {
if (seen.has(option.value as AuthChoice)) {
continue;
}
options.push({
optionByValue.set(option.value as AuthChoice, {
value: option.value as AuthChoice,
label: option.label,
hint: option.hint,
});
seen.add(option.value as AuthChoice);
}
const options: AuthChoiceOption[] = Array.from(optionByValue.values());
if (params.includeSkip) {
options.push({ value: "skip", label: "Skip for now" });
}

View File

@ -1,5 +1,9 @@
import { afterEach, describe, expect, it } from "vitest";
import { afterEach, describe, expect, it, vi } from "vitest";
import anthropicPlugin from "../../extensions/anthropic/index.js";
import type { ProviderPlugin } from "../plugins/types.js";
import { registerSingleProviderPlugin } from "../test-utils/plugin-registration.js";
import { applyAuthChoiceAnthropic } from "./auth-choice.apply.anthropic.js";
import { applyAuthChoice } from "./auth-choice.js";
import { ANTHROPIC_SETUP_TOKEN_PREFIX } from "./auth-token.js";
import {
createAuthTestLifecycle,
@ -9,11 +13,17 @@ import {
setupAuthTestEnv,
} from "./test-wizard-helpers.js";
const resolvePluginProviders = vi.hoisted(() => vi.fn<() => ProviderPlugin[]>(() => []));
vi.mock("../plugins/providers.js", () => ({
resolvePluginProviders,
}));
describe("applyAuthChoiceAnthropic", () => {
const lifecycle = createAuthTestLifecycle([
"OPENCLAW_STATE_DIR",
"OPENCLAW_AGENT_DIR",
"PI_CODING_AGENT_DIR",
"ANTHROPIC_API_KEY",
"ANTHROPIC_SETUP_TOKEN",
]);
@ -24,18 +34,21 @@ describe("applyAuthChoiceAnthropic", () => {
}
afterEach(async () => {
resolvePluginProviders.mockReset();
resolvePluginProviders.mockReturnValue([]);
await lifecycle.cleanup();
});
it("persists setup-token ref without plaintext token in auth-profiles store", async () => {
it("writes env-backed Anthropic key as keyRef when secret-input-mode=ref", async () => {
const agentDir = await setupTempState();
process.env.ANTHROPIC_SETUP_TOKEN = `${ANTHROPIC_SETUP_TOKEN_PREFIX}${"x".repeat(100)}`;
process.env.ANTHROPIC_API_KEY = "sk-ant-api-key";
const prompter = createWizardPrompter({}, { defaultSelect: "ref" });
const confirm = vi.fn(async () => true);
const prompter = createWizardPrompter({ confirm }, { defaultSelect: "ref" });
const runtime = createExitThrowingRuntime();
const result = await applyAuthChoiceAnthropic({
authChoice: "setup-token",
authChoice: "apiKey",
config: {},
prompter,
runtime,
@ -45,9 +58,40 @@ describe("applyAuthChoiceAnthropic", () => {
expect(result).not.toBeNull();
expect(result?.config.auth?.profiles?.["anthropic:default"]).toMatchObject({
provider: "anthropic",
mode: "token",
mode: "api_key",
});
const parsed = await readAuthProfilesForAgent<{
profiles?: Record<string, { key?: string; keyRef?: unknown }>;
}>(agentDir);
expect(parsed.profiles?.["anthropic:default"]).toMatchObject({
keyRef: { source: "env", provider: "default", id: "ANTHROPIC_API_KEY" },
});
expect(parsed.profiles?.["anthropic:default"]?.key).toBeUndefined();
});
it("routes token onboarding through the anthropic provider plugin", async () => {
const agentDir = await setupTempState();
process.env.ANTHROPIC_SETUP_TOKEN = `${ANTHROPIC_SETUP_TOKEN_PREFIX}${"x".repeat(100)}`;
resolvePluginProviders.mockReturnValue([registerSingleProviderPlugin(anthropicPlugin)]);
const select = vi.fn().mockResolvedValueOnce("env");
const text = vi.fn().mockResolvedValueOnce("ANTHROPIC_SETUP_TOKEN").mockResolvedValueOnce("");
const prompter = createWizardPrompter({ select, text }, { defaultSelect: "ref" });
const runtime = createExitThrowingRuntime();
const result = await applyAuthChoice({
authChoice: "token",
config: {},
prompter,
runtime,
setDefaultModel: true,
opts: { secretInputMode: "ref" },
});
expect(result.config.auth?.profiles?.["anthropic:default"]).toMatchObject({
provider: "anthropic",
mode: "token",
});
const parsed = await readAuthProfilesForAgent<{
profiles?: Record<string, { token?: string; tokenRef?: unknown }>;
}>(agentDir);

View File

@ -1,13 +1,10 @@
import { upsertAuthProfile } from "../agents/auth-profiles.js";
import { normalizeApiKeyInput, validateApiKeyInput } from "./auth-choice.api-key.js";
import {
normalizeSecretInputModeInput,
ensureApiKeyFromOptionEnvOrPrompt,
promptSecretRefForSetup,
resolveSecretInputModeForEnvSelection,
} from "./auth-choice.apply-helpers.js";
import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js";
import { buildTokenProfileId, validateAnthropicSetupToken } from "./auth-token.js";
import { applyAuthChoicePluginProvider } from "./auth-choice.apply.plugin-provider.js";
import { applyAgentDefaultModelPrimary } from "./onboard-auth.config-shared.js";
import { applyAuthProfileConfig, setAnthropicApiKey } from "./onboard-auth.js";
@ -22,80 +19,13 @@ export async function applyAuthChoiceAnthropic(
params.authChoice === "oauth" ||
params.authChoice === "token"
) {
let nextConfig = params.config;
await params.prompter.note(
["Run `claude setup-token` in your terminal.", "Then paste the generated token below."].join(
"\n",
),
"Anthropic setup-token",
);
const selectedMode = await resolveSecretInputModeForEnvSelection({
prompter: params.prompter,
explicitMode: requestedSecretInputMode,
copy: {
modeMessage: "How do you want to provide this setup token?",
plaintextLabel: "Paste setup token now",
plaintextHint: "Stores the token directly in the auth profile",
},
return await applyAuthChoicePluginProvider(params, {
authChoice: params.authChoice,
pluginId: "anthropic",
providerId: "anthropic",
methodId: "setup-token",
label: "Anthropic",
});
let token = "";
let tokenRef: { source: "env" | "file" | "exec"; provider: string; id: string } | undefined;
if (selectedMode === "ref") {
const resolved = await promptSecretRefForSetup({
provider: "anthropic-setup-token",
config: params.config,
prompter: params.prompter,
preferredEnvVar: "ANTHROPIC_SETUP_TOKEN",
copy: {
sourceMessage: "Where is this Anthropic setup token stored?",
envVarPlaceholder: "ANTHROPIC_SETUP_TOKEN",
},
});
token = resolved.resolvedValue.trim();
tokenRef = resolved.ref;
} else {
const tokenRaw = await params.prompter.text({
message: "Paste Anthropic setup-token",
validate: (value) => validateAnthropicSetupToken(String(value ?? "")),
});
token = String(tokenRaw ?? "").trim();
}
const tokenValidationError = validateAnthropicSetupToken(token);
if (tokenValidationError) {
throw new Error(tokenValidationError);
}
const profileNameRaw = await params.prompter.text({
message: "Token name (blank = default)",
placeholder: "default",
});
const provider = "anthropic";
const namedProfileId = buildTokenProfileId({
provider,
name: String(profileNameRaw ?? ""),
});
upsertAuthProfile({
profileId: namedProfileId,
agentDir: params.agentDir,
credential: {
type: "token",
provider,
token,
...(tokenRef ? { tokenRef } : {}),
},
});
nextConfig = applyAuthProfileConfig(nextConfig, {
profileId: namedProfileId,
provider,
mode: "token",
});
if (params.setDefaultModel) {
nextConfig = applyAgentDefaultModelPrimary(nextConfig, DEFAULT_ANTHROPIC_MODEL);
}
return { config: nextConfig };
}
if (params.authChoice === "apiKey") {

View File

@ -6,14 +6,8 @@ import {
} from "./auth-choice.apply-helpers.js";
import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js";
import { applyDefaultModelChoice } from "./auth-choice.default-model.js";
import { isRemoteEnvironment } from "./oauth-env.js";
import { applyAuthProfileConfig, setOpenaiApiKey, writeOAuthCredentials } from "./onboard-auth.js";
import { openUrl } from "./onboard-helpers.js";
import {
applyOpenAICodexModelDefault,
OPENAI_CODEX_DEFAULT_MODEL,
} from "./openai-codex-model-default.js";
import { loginOpenAICodexOAuth } from "./openai-codex-oauth.js";
import { applyAuthChoicePluginProvider } from "./auth-choice.apply.plugin-provider.js";
import { applyAuthProfileConfig, setOpenaiApiKey } from "./onboard-auth.js";
import {
applyOpenAIConfig,
applyOpenAIProviderConfig,
@ -72,51 +66,14 @@ export async function applyAuthChoiceOpenAI(
});
return await applyOpenAiDefaultModelChoice();
}
if (params.authChoice === "openai-codex") {
let nextConfig = params.config;
let agentModelOverride: string | undefined;
let creds;
try {
creds = await loginOpenAICodexOAuth({
prompter: params.prompter,
runtime: params.runtime,
isRemote: isRemoteEnvironment(),
openUrl: async (url) => {
await openUrl(url);
},
localBrowserMessage: "Complete sign-in in browser…",
});
} catch {
// The helper already surfaces the error to the user.
// Keep setup flow alive and return unchanged config.
return { config: nextConfig, agentModelOverride };
}
if (creds) {
const profileId = await writeOAuthCredentials("openai-codex", creds, params.agentDir, {
syncSiblingAgents: true,
});
nextConfig = applyAuthProfileConfig(nextConfig, {
profileId,
provider: "openai-codex",
mode: "oauth",
});
if (params.setDefaultModel) {
const applied = applyOpenAICodexModelDefault(nextConfig);
nextConfig = applied.next;
if (applied.changed) {
await params.prompter.note(
`Default model set to ${OPENAI_CODEX_DEFAULT_MODEL}`,
"Model configured",
);
}
} else {
agentModelOverride = OPENAI_CODEX_DEFAULT_MODEL;
await noteAgentModel(OPENAI_CODEX_DEFAULT_MODEL);
}
}
return { config: nextConfig, agentModelOverride };
return await applyAuthChoicePluginProvider(params, {
authChoice: "openai-codex",
pluginId: "openai",
providerId: "openai-codex",
methodId: "oauth",
label: "OpenAI",
});
}
return null;

View File

@ -13,6 +13,7 @@ import { isRemoteEnvironment } from "./oauth-env.js";
import { createVpsAwareOAuthHandlers } from "./oauth-flow.js";
import { applyAuthProfileConfig } from "./onboard-auth.js";
import { openUrl } from "./onboard-helpers.js";
import type { OnboardOptions } from "./onboard-types.js";
import {
applyDefaultModel,
mergeConfigPatch,
@ -41,6 +42,8 @@ export async function runProviderPluginAuthMethod(params: {
agentId?: string;
workspaceDir?: string;
emitNotes?: boolean;
secretInputMode?: OnboardOptions["secretInputMode"];
allowSecretRefPrompt?: boolean;
}): Promise<{ config: ApplyAuthChoiceParams["config"]; defaultModel?: string }> {
const agentId = params.agentId ?? resolveDefaultAgentId(params.config);
const defaultAgentId = resolveDefaultAgentId(params.config);
@ -61,6 +64,8 @@ export async function runProviderPluginAuthMethod(params: {
workspaceDir,
prompter: params.prompter,
runtime: params.runtime,
secretInputMode: params.secretInputMode,
allowSecretRefPrompt: params.allowSecretRefPrompt,
isRemote,
openUrl: async (url) => {
await openUrl(url);
@ -127,6 +132,8 @@ export async function applyAuthChoiceLoadedPluginProvider(
agentDir: params.agentDir,
agentId: params.agentId,
workspaceDir,
secretInputMode: params.opts?.secretInputMode,
allowSecretRefPrompt: true,
});
let agentModelOverride: string | undefined;
@ -204,6 +211,8 @@ export async function applyAuthChoicePluginProvider(
agentDir,
agentId,
workspaceDir,
secretInputMode: params.opts?.secretInputMode,
allowSecretRefPrompt: true,
});
nextConfig = applied.config;

View File

@ -4,7 +4,7 @@ import { loadModelCatalog } from "../agents/model-catalog.js";
import { resolveDefaultModelForAgent } from "../agents/model-selection.js";
import type { OpenClawConfig } from "../config/config.js";
import type { WizardPrompter } from "../wizard/prompts.js";
import { OPENAI_CODEX_DEFAULT_MODEL } from "./openai-codex-model-default.js";
import { buildProviderAuthRecoveryHint } from "./provider-auth-guidance.js";
export async function warnIfModelConfigLooksOff(
config: OpenClawConfig,
@ -37,19 +37,16 @@ export async function warnIfModelConfigLooksOff(
const hasCustomKey = hasUsableCustomProviderApiKey(config, ref.provider);
if (!hasProfile && !envKey && !hasCustomKey) {
warnings.push(
`No auth configured for provider "${ref.provider}". The agent may fail until credentials are added.`,
`No auth configured for provider "${ref.provider}". The agent may fail until credentials are added. ${buildProviderAuthRecoveryHint(
{
provider: ref.provider,
config,
includeEnvVar: true,
},
)}`,
);
}
if (ref.provider === "openai") {
const hasCodex = listProfilesForProvider(store, "openai-codex").length > 0;
if (hasCodex) {
warnings.push(
`Detected OpenAI Codex OAuth. Consider setting agents.defaults.model to ${OPENAI_CODEX_DEFAULT_MODEL}.`,
);
}
}
if (warnings.length > 0) {
await prompter.note(warnings.join("\n"), "Model check");
}

View File

@ -139,6 +139,27 @@ describe("applyAuthChoice", () => {
await setupTempState();
loginOpenAICodexOAuth.mockRejectedValueOnce(new Error("oauth failed"));
resolvePluginProviders.mockReturnValue([
{
id: "openai-codex",
label: "OpenAI Codex",
auth: [
{
id: "oauth",
label: "ChatGPT OAuth",
kind: "oauth",
run: vi.fn(async () => {
try {
await loginOpenAICodexOAuth();
} catch {
return { profiles: [] };
}
return { profiles: [] };
}),
},
],
},
] as never);
const prompter = createPrompter({});
const runtime = createExitThrowingRuntime();
@ -163,6 +184,41 @@ describe("applyAuthChoice", () => {
access: "access-token",
expires: Date.now() + 60_000,
});
resolvePluginProviders.mockReturnValue([
{
id: "openai-codex",
label: "OpenAI Codex",
auth: [
{
id: "oauth",
label: "ChatGPT OAuth",
kind: "oauth",
run: vi.fn(async () => {
const creds = await loginOpenAICodexOAuth();
if (!creds) {
return { profiles: [] };
}
return {
profiles: [
{
profileId: "openai-codex:user@example.com",
credential: {
type: "oauth",
provider: "openai-codex",
refresh: "refresh-token",
access: "access-token",
expires: creds.expires,
email: "user@example.com",
},
},
],
defaultModel: "openai-codex/gpt-5.4",
};
}),
},
],
},
] as never);
const prompter = createPrompter({});
const runtime = createExitThrowingRuntime();

View File

@ -17,6 +17,10 @@ import { formatCliCommand } from "../cli/command-format.js";
import type { OpenClawConfig } from "../config/config.js";
import { note } from "../terminal/note.js";
import type { DoctorPrompter } from "./doctor-prompter.js";
import {
buildProviderAuthRecoveryHint,
resolveProviderAuthLoginCommand,
} from "./provider-auth-guidance.js";
export async function maybeRepairAnthropicOAuthProfileId(
cfg: OpenClawConfig,
@ -129,16 +133,16 @@ export async function maybeRemoveDeprecatedCliAuthProfiles(
const lines = ["Deprecated external CLI auth profiles detected (no longer supported):"];
if (deprecated.has(CLAUDE_CLI_PROFILE_ID)) {
lines.push(
`- ${CLAUDE_CLI_PROFILE_ID} (Anthropic): use setup-token → ${formatCliCommand("openclaw models auth setup-token")}`,
);
const authCommand =
resolveProviderAuthLoginCommand({ provider: "anthropic" }) ??
formatCliCommand("openclaw configure");
lines.push(`- ${CLAUDE_CLI_PROFILE_ID} (Anthropic): use ${authCommand}`);
}
if (deprecated.has(CODEX_CLI_PROFILE_ID)) {
lines.push(
`- ${CODEX_CLI_PROFILE_ID} (OpenAI Codex): use OAuth → ${formatCliCommand(
"openclaw models auth login --provider openai-codex",
)}`,
);
const authCommand =
resolveProviderAuthLoginCommand({ provider: "openai-codex" }) ??
formatCliCommand("openclaw configure");
lines.push(`- ${CODEX_CLI_PROFILE_ID} (OpenAI Codex): use ${authCommand}`);
}
note(lines.join("\n"), "Auth profiles");
@ -228,16 +232,18 @@ function formatAuthIssueHint(issue: AuthIssue): string | null {
return "Invalid token expires metadata. Set a future Unix ms timestamp or remove expires.";
}
if (issue.provider === "anthropic" && issue.profileId === CLAUDE_CLI_PROFILE_ID) {
return `Deprecated profile. Use ${formatCliCommand("openclaw models auth setup-token")} or ${formatCliCommand(
"openclaw configure",
)}.`;
return `Deprecated profile. ${buildProviderAuthRecoveryHint({
provider: "anthropic",
})}`;
}
if (issue.provider === "openai-codex" && issue.profileId === CODEX_CLI_PROFILE_ID) {
return `Deprecated profile. Use ${formatCliCommand(
"openclaw models auth login --provider openai-codex",
)} or ${formatCliCommand("openclaw configure")}.`;
return `Deprecated profile. ${buildProviderAuthRecoveryHint({
provider: "openai-codex",
})}`;
}
return `Re-auth via \`${formatCliCommand("openclaw configure")}\` or \`${formatCliCommand("openclaw onboard")}\`.`;
return buildProviderAuthRecoveryHint({
provider: issue.provider,
}).replace(/^Run /, "Re-auth via ");
}
function formatAuthIssueLine(issue: AuthIssue): string {

View File

@ -14,7 +14,6 @@ import { resolveAgentModelPrimaryValue } from "../config/model-input.js";
import type { ProviderPlugin } from "../plugins/types.js";
import type { WizardPrompter, WizardSelectOption } from "../wizard/prompts.js";
import { formatTokenK } from "./models/shared.js";
import { OPENAI_CODEX_DEFAULT_MODEL } from "./openai-codex-model-default.js";
const KEEP_VALUE = "__keep__";
const MANUAL_VALUE = "__manual__";
@ -154,14 +153,6 @@ function addModelSelectOption(params: {
params.seen.add(key);
}
function isAnthropicLegacyModel(entry: { provider: string; id: string }): boolean {
return (
entry.provider === "anthropic" &&
typeof entry.id === "string" &&
entry.id.toLowerCase().startsWith("claude-3")
);
}
async function promptManualModel(params: {
prompter: WizardPrompter;
allowBlank: boolean;
@ -270,9 +261,6 @@ export async function promptDefaultModel(
}
return entry.provider === preferredProvider;
});
if (preferredProvider === "anthropic") {
models = models.filter((entry) => !isAnthropicLegacyModel(entry));
}
}
const agentDir = params.agentDir;
@ -459,7 +447,7 @@ export async function promptModelAllowlist(params: {
params.message ??
"Allowlist models (comma-separated provider/model; blank to keep current)",
initialValue: existingKeys.join(", "),
placeholder: `${OPENAI_CODEX_DEFAULT_MODEL}, anthropic/claude-opus-4-6`,
placeholder: "provider/model, other-provider/model",
});
const parsed = String(raw ?? "")
.split(",")

View File

@ -275,6 +275,7 @@ async function runProviderAuthMethod(params: {
workspaceDir: params.workspaceDir,
prompter: params.prompter,
runtime: params.runtime,
allowSecretRefPrompt: false,
isRemote: isRemoteEnvironment(),
openUrl: async (url) => {
await openUrl(url);

View File

@ -23,7 +23,6 @@ import {
resolveDefaultModelForAgent,
resolveModelRefFromString,
} from "../../agents/model-selection.js";
import { formatCliCommand } from "../../cli/command-format.js";
import { withProgressTotals } from "../../cli/progress.js";
import { createConfigIO } from "../../config/config.js";
import {
@ -41,6 +40,7 @@ import type { RuntimeEnv } from "../../runtime.js";
import { getTerminalTableWidth, renderTable } from "../../terminal/table.js";
import { colorize, theme } from "../../terminal/theme.js";
import { shortenHomePath } from "../../utils.js";
import { buildProviderAuthRecoveryHint } from "../provider-auth-guidance.js";
import { resolveProviderAuthOverview } from "./list.auth-overview.js";
import { isRich } from "./list.format.js";
import {
@ -536,10 +536,11 @@ export async function modelsStatusCommand(
runtime.log("");
runtime.log(colorize(rich, theme.heading, "Missing auth"));
for (const provider of missingProvidersInUse) {
const hint =
provider === "anthropic"
? `Run \`claude setup-token\`, then \`${formatCliCommand("openclaw models auth setup-token")}\` or \`${formatCliCommand("openclaw configure")}\`.`
: `Run \`${formatCliCommand("openclaw configure")}\` or set an API key env var.`;
const hint = buildProviderAuthRecoveryHint({
provider,
config: cfg,
includeEnvVar: true,
});
runtime.log(`- ${theme.heading(provider)} ${hint}`);
}
}

View File

@ -1,4 +1,8 @@
import { resolveDefaultAgentId, resolveAgentWorkspaceDir } from "../../../agents/agent-scope.js";
import {
resolveAgentDir,
resolveDefaultAgentId,
resolveAgentWorkspaceDir,
} from "../../../agents/agent-scope.js";
import type { ApiKeyCredential } from "../../../agents/auth-profiles/types.js";
import { resolveDefaultAgentWorkspaceDir } from "../../../agents/workspace.js";
import type { OpenClawConfig } from "../../../config/config.js";
@ -58,6 +62,7 @@ export async function applyNonInteractivePluginProviderChoice(params: {
) => ApiKeyCredential | null;
}): Promise<OpenClawConfig | null | undefined> {
const agentId = resolveDefaultAgentId(params.nextConfig);
const agentDir = resolveAgentDir(params.nextConfig, agentId);
const workspaceDir =
resolveAgentWorkspaceDir(params.nextConfig, agentId) ?? resolveDefaultAgentWorkspaceDir();
const prefixedProviderId = params.authChoice.startsWith(PROVIDER_PLUGIN_CHOICE_PREFIX)
@ -116,6 +121,7 @@ export async function applyNonInteractivePluginProviderChoice(params: {
baseConfig: params.baseConfig,
opts: params.opts,
runtime: params.runtime,
agentDir,
workspaceDir,
resolveApiKey: params.resolveApiKey,
toApiKeyCredential: params.toApiKeyCredential,

View File

@ -1,14 +1,9 @@
import { upsertAuthProfile } from "../../../agents/auth-profiles.js";
import type { ApiKeyCredential } from "../../../agents/auth-profiles/types.js";
import { normalizeProviderId } from "../../../agents/model-selection.js";
import { parseDurationMs } from "../../../cli/parse-duration.js";
import type { OpenClawConfig } from "../../../config/config.js";
import type { SecretInput } from "../../../config/types.secrets.js";
import type { RuntimeEnv } from "../../../runtime.js";
import { resolveDefaultSecretProviderAlias } from "../../../secrets/ref-contract.js";
import { normalizeSecretInput } from "../../../utils/normalize-secret-input.js";
import { normalizeSecretInputModeInput } from "../../auth-choice.apply-helpers.js";
import { buildTokenProfileId, validateAnthropicSetupToken } from "../../auth-token.js";
import {
applyAuthProfileConfig,
applyCloudflareAiGatewayConfig,
@ -161,61 +156,6 @@ export async function applyNonInteractiveAuthChoice(params: {
return null;
}
if (authChoice === "token") {
const providerRaw = opts.tokenProvider?.trim();
if (!providerRaw) {
runtime.error("Missing --token-provider for --auth-choice token.");
runtime.exit(1);
return null;
}
const provider = normalizeProviderId(providerRaw);
if (provider !== "anthropic") {
runtime.error("Only --token-provider anthropic is supported for --auth-choice token.");
runtime.exit(1);
return null;
}
const tokenRaw = normalizeSecretInput(opts.token);
if (!tokenRaw) {
runtime.error("Missing --token for --auth-choice token.");
runtime.exit(1);
return null;
}
const tokenError = validateAnthropicSetupToken(tokenRaw);
if (tokenError) {
runtime.error(tokenError);
runtime.exit(1);
return null;
}
let expires: number | undefined;
const expiresInRaw = opts.tokenExpiresIn?.trim();
if (expiresInRaw) {
try {
expires = Date.now() + parseDurationMs(expiresInRaw, { defaultUnit: "d" });
} catch (err) {
runtime.error(`Invalid --token-expires-in: ${String(err)}`);
runtime.exit(1);
return null;
}
}
const profileId = opts.tokenProfileId?.trim() || buildTokenProfileId({ provider, name: "" });
upsertAuthProfile({
profileId,
credential: {
type: "token",
provider,
token: tokenRaw.trim(),
...(expires ? { expires } : {}),
},
});
return applyAuthProfileConfig(nextConfig, {
profileId,
provider,
mode: "token",
});
}
const simpleApiKeyChoice = await applySimpleNonInteractiveApiKeyChoice({
authChoice,
nextConfig,
@ -487,7 +427,6 @@ export async function applyNonInteractiveAuthChoice(params: {
if (
authChoice === "oauth" ||
authChoice === "chutes" ||
authChoice === "openai-codex" ||
authChoice === "qwen-portal" ||
authChoice === "minimax-global-oauth" ||
authChoice === "minimax-cn-oauth"

View File

@ -1,58 +0,0 @@
import type { OpenClawConfig } from "../config/config.js";
import type { AgentModelListConfig } from "../config/types.js";
export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.4";
function shouldSetOpenAICodexModel(model?: string): boolean {
const trimmed = model?.trim();
if (!trimmed) {
return true;
}
const normalized = trimmed.toLowerCase();
if (normalized.startsWith("openai-codex/")) {
return false;
}
if (normalized.startsWith("openai/")) {
return true;
}
return normalized === "gpt" || normalized === "gpt-mini";
}
function resolvePrimaryModel(model?: AgentModelListConfig | string): string | undefined {
if (typeof model === "string") {
return model;
}
if (model && typeof model === "object" && typeof model.primary === "string") {
return model.primary;
}
return undefined;
}
export function applyOpenAICodexModelDefault(cfg: OpenClawConfig): {
next: OpenClawConfig;
changed: boolean;
} {
const current = resolvePrimaryModel(cfg.agents?.defaults?.model);
if (!shouldSetOpenAICodexModel(current)) {
return { next: cfg, changed: false };
}
return {
next: {
...cfg,
agents: {
...cfg.agents,
defaults: {
...cfg.agents?.defaults,
model:
cfg.agents?.defaults?.model && typeof cfg.agents.defaults.model === "object"
? {
...cfg.agents.defaults.model,
primary: OPENAI_CODEX_DEFAULT_MODEL,
}
: { primary: OPENAI_CODEX_DEFAULT_MODEL },
},
},
},
changed: true,
};
}

View File

@ -6,10 +6,6 @@ import {
applyGoogleGeminiModelDefault,
GOOGLE_GEMINI_DEFAULT_MODEL,
} from "./google-gemini-model-default.js";
import {
applyOpenAICodexModelDefault,
OPENAI_CODEX_DEFAULT_MODEL,
} from "./openai-codex-model-default.js";
import {
applyOpenAIConfig,
applyOpenAIProviderConfig,
@ -197,38 +193,6 @@ describe("applyOpenAIConfig", () => {
});
});
describe("applyOpenAICodexModelDefault", () => {
it("sets openai-codex default when model is unset", () => {
const cfg: OpenClawConfig = { agents: { defaults: {} } };
const applied = applyOpenAICodexModelDefault(cfg);
expectPrimaryModelChanged(applied, OPENAI_CODEX_DEFAULT_MODEL);
});
it("sets openai-codex default when model is openai/*", () => {
const cfg: OpenClawConfig = {
agents: { defaults: { model: { primary: OPENAI_DEFAULT_MODEL } } },
};
const applied = applyOpenAICodexModelDefault(cfg);
expectPrimaryModelChanged(applied, OPENAI_CODEX_DEFAULT_MODEL);
});
it("does not override openai-codex/*", () => {
const cfg: OpenClawConfig = {
agents: { defaults: { model: { primary: OPENAI_CODEX_DEFAULT_MODEL } } },
};
const applied = applyOpenAICodexModelDefault(cfg);
expectConfigUnchanged(applied, cfg);
});
it("does not override non-openai models", () => {
const cfg: OpenClawConfig = {
agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } },
};
const applied = applyOpenAICodexModelDefault(cfg);
expectConfigUnchanged(applied, cfg);
});
});
describe("applyOpencodeZenModelDefault", () => {
it("no-ops when already legacy opencode-zen default", () => {
const cfg = {

View File

@ -0,0 +1,68 @@
import { normalizeProviderId } from "../agents/model-selection.js";
import { formatCliCommand } from "../cli/command-format.js";
import type { OpenClawConfig } from "../config/config.js";
import { resolvePluginProviders } from "../plugins/providers.js";
function matchesProviderId(
candidate: { id: string; aliases?: string[] | readonly string[] },
providerId: string,
): boolean {
const normalized = normalizeProviderId(providerId);
if (!normalized) {
return false;
}
if (normalizeProviderId(candidate.id) === normalized) {
return true;
}
return (candidate.aliases ?? []).some((alias) => normalizeProviderId(alias) === normalized);
}
export function resolveProviderAuthLoginCommand(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
}): string | undefined {
const provider = resolvePluginProviders({
config: params.config,
workspaceDir: params.workspaceDir,
env: params.env,
bundledProviderAllowlistCompat: true,
bundledProviderVitestCompat: true,
}).find((candidate) => matchesProviderId(candidate, params.provider));
if (!provider || provider.auth.length === 0) {
return undefined;
}
return formatCliCommand(`openclaw models auth login --provider ${provider.id}`);
}
export function buildProviderAuthRecoveryHint(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
includeConfigure?: boolean;
includeEnvVar?: boolean;
}): string {
const loginCommand = resolveProviderAuthLoginCommand(params);
const parts: string[] = [];
if (loginCommand) {
parts.push(`Run \`${loginCommand}\``);
}
if (params.includeConfigure !== false) {
parts.push(`\`${formatCliCommand("openclaw configure")}\``);
}
if (params.includeEnvVar) {
parts.push("set an API key env var");
}
if (parts.length === 0) {
return `Run \`${formatCliCommand("openclaw configure")}\`.`;
}
if (parts.length === 1) {
return `${parts[0]}.`;
}
if (parts.length === 2) {
return `${parts[0]} or ${parts[1]}.`;
}
return `${parts[0]}, ${parts[1]}, or ${parts[2]}.`;
}

View File

@ -26,6 +26,7 @@ import VENICE_MANIFEST from "../../extensions/venice/openclaw.plugin.json" with
import VERCEL_AI_GATEWAY_MANIFEST from "../../extensions/vercel-ai-gateway/openclaw.plugin.json" with { type: "json" };
import VLLM_MANIFEST from "../../extensions/vllm/openclaw.plugin.json" with { type: "json" };
import VOLCENGINE_MANIFEST from "../../extensions/volcengine/openclaw.plugin.json" with { type: "json" };
import XAI_MANIFEST from "../../extensions/xai/openclaw.plugin.json" with { type: "json" };
import XIAOMI_MANIFEST from "../../extensions/xiaomi/openclaw.plugin.json" with { type: "json" };
import ZAI_MANIFEST from "../../extensions/zai/openclaw.plugin.json" with { type: "json" };
@ -86,6 +87,7 @@ export const BUNDLED_PROVIDER_AUTH_ENV_VAR_CANDIDATES = collectBundledProviderAu
VERCEL_AI_GATEWAY_MANIFEST,
VLLM_MANIFEST,
VOLCENGINE_MANIFEST,
XAI_MANIFEST,
XIAOMI_MANIFEST,
ZAI_MANIFEST,
]);

View File

@ -54,6 +54,7 @@ export const BUNDLED_ENABLED_BY_DEFAULT = new Set<string>([
"vercel-ai-gateway",
"vllm",
"volcengine",
"xai",
"xiaomi",
"zai",
]);

View File

@ -118,6 +118,23 @@ export type ProviderAuthContext = {
workspaceDir?: string;
prompter: WizardPrompter;
runtime: RuntimeEnv;
/**
* Onboarding secret persistence preference.
*
* Interactive wizard flows set this when the caller explicitly requested
* plaintext or env/file/exec ref storage. Ad-hoc `models auth login` flows
* usually leave it undefined.
*/
secretInputMode?: OnboardOptions["secretInputMode"];
/**
* Whether the provider auth flow should offer the onboarding secret-storage
* mode picker when `secretInputMode` is unset.
*
* This is true for onboarding/configure flows and false for direct
* `models auth` commands, which should keep a tighter, provider-owned prompt
* surface.
*/
allowSecretRefPrompt?: boolean;
isRemote: boolean;
openUrl: (url: string) => Promise<void>;
oauth: {

View File

@ -2,7 +2,6 @@ import { BUNDLED_PROVIDER_AUTH_ENV_VAR_CANDIDATES } from "../plugins/bundled-pro
const CORE_PROVIDER_AUTH_ENV_VAR_CANDIDATES = {
chutes: ["CHUTES_OAUTH_TOKEN", "CHUTES_API_KEY"],
google: ["GEMINI_API_KEY"],
voyage: ["VOYAGE_API_KEY"],
groq: ["GROQ_API_KEY"],
deepgram: ["DEEPGRAM_API_KEY"],
@ -25,15 +24,15 @@ export const PROVIDER_AUTH_ENV_VAR_CANDIDATES: Record<string, readonly string[]>
/**
* Provider env vars used for onboarding/default secret refs and broad secret
* scrubbing. This can include non-model providers and may intentionally choose
* a different preferred first env var than auth resolution.
* a different preferred first env var than auth resolution. Keep the
* anthropic override in core so generic onboarding still prefers API keys over
* OAuth tokens when both are present.
*/
export const PROVIDER_ENV_VARS: Record<string, readonly string[]> = {
...PROVIDER_AUTH_ENV_VAR_CANDIDATES,
anthropic: ["ANTHROPIC_API_KEY", "ANTHROPIC_OAUTH_TOKEN"],
chutes: ["CHUTES_API_KEY", "CHUTES_OAUTH_TOKEN"],
google: ["GEMINI_API_KEY"],
"minimax-cn": ["MINIMAX_API_KEY"],
xai: ["XAI_API_KEY"],
};
const EXTRA_PROVIDER_AUTH_ENV_VARS = ["MINIMAX_CODE_PLAN_KEY"] as const;