refactor: move plugin sdk setup helpers out of commands

This commit is contained in:
Peter Steinberger 2026-03-16 22:11:35 -07:00
parent da34f81ce2
commit 880bc969f9
No known key found for this signature in database
21 changed files with 1532 additions and 1373 deletions

View File

@ -1,531 +1 @@
import { upsertAuthProfileWithLock } from "../agents/auth-profiles/upsert-with-lock.js";
import { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-defaults.js";
import {
buildOllamaModelDefinition,
enrichOllamaModelsWithContext,
fetchOllamaModels,
resolveOllamaApiBase,
type OllamaModelWithContext,
} from "../agents/ollama-models.js";
import type { OpenClawConfig } from "../config/config.js";
import { applyAgentDefaultModelPrimary } from "../plugins/provider-onboarding-config.js";
import type { RuntimeEnv } from "../runtime.js";
import { WizardCancelledError, type WizardPrompter } from "../wizard/prompts.js";
import { isRemoteEnvironment } from "./oauth-env.js";
import { openUrl } from "./onboard-helpers.js";
import type { OnboardMode, OnboardOptions } from "./onboard-types.js";
export { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-defaults.js";
export const OLLAMA_DEFAULT_MODEL = "glm-4.7-flash";
const OLLAMA_SUGGESTED_MODELS_LOCAL = ["glm-4.7-flash"];
const OLLAMA_SUGGESTED_MODELS_CLOUD = ["kimi-k2.5:cloud", "minimax-m2.5:cloud", "glm-5:cloud"];
function normalizeOllamaModelName(value: string | undefined): string | undefined {
const trimmed = value?.trim();
if (!trimmed) {
return undefined;
}
if (trimmed.toLowerCase().startsWith("ollama/")) {
const withoutPrefix = trimmed.slice("ollama/".length).trim();
return withoutPrefix || undefined;
}
return trimmed;
}
function isOllamaCloudModel(modelName: string | undefined): boolean {
return Boolean(modelName?.trim().toLowerCase().endsWith(":cloud"));
}
function formatOllamaPullStatus(status: string): { text: string; hidePercent: boolean } {
const trimmed = status.trim();
const partStatusMatch = trimmed.match(/^([a-z-]+)\s+(?:sha256:)?[a-f0-9]{8,}$/i);
if (partStatusMatch) {
return { text: `${partStatusMatch[1]} part`, hidePercent: false };
}
if (/^verifying\b.*\bdigest\b/i.test(trimmed)) {
return { text: "verifying digest", hidePercent: true };
}
return { text: trimmed, hidePercent: false };
}
type OllamaCloudAuthResult = {
signedIn: boolean;
signinUrl?: string;
};
/** Check if the user is signed in to Ollama cloud via /api/me. */
async function checkOllamaCloudAuth(baseUrl: string): Promise<OllamaCloudAuthResult> {
try {
const apiBase = resolveOllamaApiBase(baseUrl);
const response = await fetch(`${apiBase}/api/me`, {
method: "POST",
signal: AbortSignal.timeout(5000),
});
if (response.status === 401) {
// 401 body contains { error, signin_url }
const data = (await response.json()) as { signin_url?: string };
return { signedIn: false, signinUrl: data.signin_url };
}
if (!response.ok) {
return { signedIn: false };
}
return { signedIn: true };
} catch {
// /api/me not supported or unreachable — fail closed so cloud mode
// doesn't silently skip auth; the caller handles the fallback.
return { signedIn: false };
}
}
type OllamaPullChunk = {
status?: string;
total?: number;
completed?: number;
error?: string;
};
type OllamaPullFailureKind = "http" | "no-body" | "chunk-error" | "network";
type OllamaPullResult =
| { ok: true }
| {
ok: false;
kind: OllamaPullFailureKind;
message: string;
};
async function pullOllamaModelCore(params: {
baseUrl: string;
modelName: string;
onStatus?: (status: string, percent: number | null) => void;
}): Promise<OllamaPullResult> {
const { onStatus } = params;
const baseUrl = resolveOllamaApiBase(params.baseUrl);
const modelName = normalizeOllamaModelName(params.modelName) ?? params.modelName.trim();
try {
const response = await fetch(`${baseUrl}/api/pull`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ name: modelName }),
});
if (!response.ok) {
return {
ok: false,
kind: "http",
message: `Failed to download ${modelName} (HTTP ${response.status})`,
};
}
if (!response.body) {
return {
ok: false,
kind: "no-body",
message: `Failed to download ${modelName} (no response body)`,
};
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = "";
const layers = new Map<string, { total: number; completed: number }>();
const parseLine = (line: string): OllamaPullResult => {
const trimmed = line.trim();
if (!trimmed) {
return { ok: true };
}
try {
const chunk = JSON.parse(trimmed) as OllamaPullChunk;
if (chunk.error) {
return {
ok: false,
kind: "chunk-error",
message: `Download failed: ${chunk.error}`,
};
}
if (!chunk.status) {
return { ok: true };
}
if (chunk.total && chunk.completed !== undefined) {
layers.set(chunk.status, { total: chunk.total, completed: chunk.completed });
let totalSum = 0;
let completedSum = 0;
for (const layer of layers.values()) {
totalSum += layer.total;
completedSum += layer.completed;
}
const percent = totalSum > 0 ? Math.round((completedSum / totalSum) * 100) : null;
onStatus?.(chunk.status, percent);
} else {
onStatus?.(chunk.status, null);
}
} catch {
// Ignore malformed lines from streaming output.
}
return { ok: true };
};
for (;;) {
const { done, value } = await reader.read();
if (done) {
break;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() ?? "";
for (const line of lines) {
const parsed = parseLine(line);
if (!parsed.ok) {
return parsed;
}
}
}
const trailing = buffer.trim();
if (trailing) {
const parsed = parseLine(trailing);
if (!parsed.ok) {
return parsed;
}
}
return { ok: true };
} catch (err) {
const reason = err instanceof Error ? err.message : String(err);
return {
ok: false,
kind: "network",
message: `Failed to download ${modelName}: ${reason}`,
};
}
}
/** Pull a model from Ollama, streaming progress updates. */
async function pullOllamaModel(
baseUrl: string,
modelName: string,
prompter: WizardPrompter,
): Promise<boolean> {
const spinner = prompter.progress(`Downloading ${modelName}...`);
const result = await pullOllamaModelCore({
baseUrl,
modelName,
onStatus: (status, percent) => {
const displayStatus = formatOllamaPullStatus(status);
if (displayStatus.hidePercent) {
spinner.update(`Downloading ${modelName} - ${displayStatus.text}`);
} else {
spinner.update(`Downloading ${modelName} - ${displayStatus.text} - ${percent ?? 0}%`);
}
},
});
if (!result.ok) {
spinner.stop(result.message);
return false;
}
spinner.stop(`Downloaded ${modelName}`);
return true;
}
async function pullOllamaModelNonInteractive(
baseUrl: string,
modelName: string,
runtime: RuntimeEnv,
): Promise<boolean> {
runtime.log(`Downloading ${modelName}...`);
const result = await pullOllamaModelCore({ baseUrl, modelName });
if (!result.ok) {
runtime.error(result.message);
return false;
}
runtime.log(`Downloaded ${modelName}`);
return true;
}
function buildOllamaModelsConfig(
modelNames: string[],
discoveredModelsByName?: Map<string, OllamaModelWithContext>,
) {
return modelNames.map((name) =>
buildOllamaModelDefinition(name, discoveredModelsByName?.get(name)?.contextWindow),
);
}
function applyOllamaProviderConfig(
cfg: OpenClawConfig,
baseUrl: string,
modelNames: string[],
discoveredModelsByName?: Map<string, OllamaModelWithContext>,
): OpenClawConfig {
return {
...cfg,
models: {
...cfg.models,
mode: cfg.models?.mode ?? "merge",
providers: {
...cfg.models?.providers,
ollama: {
baseUrl,
api: "ollama",
apiKey: "OLLAMA_API_KEY", // pragma: allowlist secret
models: buildOllamaModelsConfig(modelNames, discoveredModelsByName),
},
},
},
};
}
async function storeOllamaCredential(agentDir?: string): Promise<void> {
await upsertAuthProfileWithLock({
profileId: "ollama:default",
credential: { type: "api_key", provider: "ollama", key: "ollama-local" },
agentDir,
});
}
/**
* Interactive: prompt for base URL, discover models, configure provider.
* Model selection is handled by the standard model picker downstream.
*/
export async function promptAndConfigureOllama(params: {
cfg: OpenClawConfig;
prompter: WizardPrompter;
}): Promise<{ config: OpenClawConfig; defaultModelId: string }> {
const { prompter } = params;
// 1. Prompt base URL
const baseUrlRaw = await prompter.text({
message: "Ollama base URL",
initialValue: OLLAMA_DEFAULT_BASE_URL,
placeholder: OLLAMA_DEFAULT_BASE_URL,
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const configuredBaseUrl = String(baseUrlRaw ?? "")
.trim()
.replace(/\/+$/, "");
const baseUrl = resolveOllamaApiBase(configuredBaseUrl);
// 2. Check reachability
const { reachable, models } = await fetchOllamaModels(baseUrl);
if (!reachable) {
await prompter.note(
[
`Ollama could not be reached at ${baseUrl}.`,
"Download it at https://ollama.com/download",
"",
"Start Ollama and re-run setup.",
].join("\n"),
"Ollama",
);
throw new WizardCancelledError("Ollama not reachable");
}
const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50));
const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model]));
const modelNames = models.map((m) => m.name);
// 3. Mode selection
const mode = (await prompter.select({
message: "Ollama mode",
options: [
{ value: "remote", label: "Cloud + Local", hint: "Ollama cloud models + local models" },
{ value: "local", label: "Local", hint: "Local models only" },
],
})) as OnboardMode;
// 4. Cloud auth — check /api/me upfront for remote (cloud+local) mode
let cloudAuthVerified = false;
if (mode === "remote") {
const authResult = await checkOllamaCloudAuth(baseUrl);
if (!authResult.signedIn) {
if (authResult.signinUrl) {
if (!isRemoteEnvironment()) {
await openUrl(authResult.signinUrl);
}
await prompter.note(
["Sign in to Ollama Cloud:", authResult.signinUrl].join("\n"),
"Ollama Cloud",
);
const confirmed = await prompter.confirm({
message: "Have you signed in?",
});
if (!confirmed) {
throw new WizardCancelledError("Ollama cloud sign-in cancelled");
}
// Re-check after user claims sign-in
const recheck = await checkOllamaCloudAuth(baseUrl);
if (!recheck.signedIn) {
throw new WizardCancelledError("Ollama cloud sign-in required");
}
cloudAuthVerified = true;
} else {
// No signin URL available (older server, unreachable /api/me, or custom gateway).
await prompter.note(
[
"Could not verify Ollama Cloud authentication.",
"Cloud models may not work until you sign in at https://ollama.com.",
].join("\n"),
"Ollama Cloud",
);
const continueAnyway = await prompter.confirm({
message: "Continue without cloud auth?",
});
if (!continueAnyway) {
throw new WizardCancelledError("Ollama cloud auth could not be verified");
}
// Cloud auth unverified — fall back to local defaults so the model
// picker doesn't steer toward cloud models that may fail.
}
} else {
cloudAuthVerified = true;
}
}
// 5. Model ordering — suggested models first.
// Use cloud defaults only when auth was actually verified; otherwise fall
// back to local defaults so the user isn't steered toward cloud models
// that may fail at runtime.
const suggestedModels =
mode === "local" || !cloudAuthVerified
? OLLAMA_SUGGESTED_MODELS_LOCAL
: OLLAMA_SUGGESTED_MODELS_CLOUD;
const orderedModelNames = [
...suggestedModels,
...modelNames.filter((name) => !suggestedModels.includes(name)),
];
const defaultModelId = suggestedModels[0] ?? OLLAMA_DEFAULT_MODEL;
const config = applyOllamaProviderConfig(
params.cfg,
baseUrl,
orderedModelNames,
discoveredModelsByName,
);
return { config, defaultModelId };
}
/** Non-interactive: auto-discover models and configure provider. */
export async function configureOllamaNonInteractive(params: {
nextConfig: OpenClawConfig;
opts: OnboardOptions;
runtime: RuntimeEnv;
}): Promise<OpenClawConfig> {
const { opts, runtime } = params;
const configuredBaseUrl = (opts.customBaseUrl?.trim() || OLLAMA_DEFAULT_BASE_URL).replace(
/\/+$/,
"",
);
const baseUrl = resolveOllamaApiBase(configuredBaseUrl);
const { reachable, models } = await fetchOllamaModels(baseUrl);
const explicitModel = normalizeOllamaModelName(opts.customModelId);
if (!reachable) {
runtime.error(
[
`Ollama could not be reached at ${baseUrl}.`,
"Download it at https://ollama.com/download",
].join("\n"),
);
runtime.exit(1);
return params.nextConfig;
}
await storeOllamaCredential();
const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50));
const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model]));
const modelNames = models.map((m) => m.name);
// Apply local suggested model ordering.
const suggestedModels = OLLAMA_SUGGESTED_MODELS_LOCAL;
const orderedModelNames = [
...suggestedModels,
...modelNames.filter((name) => !suggestedModels.includes(name)),
];
const requestedDefaultModelId = explicitModel ?? suggestedModels[0];
let pulledRequestedModel = false;
const availableModelNames = new Set(modelNames);
const requestedCloudModel = isOllamaCloudModel(requestedDefaultModelId);
if (requestedCloudModel) {
availableModelNames.add(requestedDefaultModelId);
}
// Pull if model not in discovered list and Ollama is reachable
if (!requestedCloudModel && !modelNames.includes(requestedDefaultModelId)) {
pulledRequestedModel = await pullOllamaModelNonInteractive(
baseUrl,
requestedDefaultModelId,
runtime,
);
if (pulledRequestedModel) {
availableModelNames.add(requestedDefaultModelId);
}
}
let allModelNames = orderedModelNames;
let defaultModelId = requestedDefaultModelId;
if (
(pulledRequestedModel || requestedCloudModel) &&
!allModelNames.includes(requestedDefaultModelId)
) {
allModelNames = [...allModelNames, requestedDefaultModelId];
}
if (!availableModelNames.has(requestedDefaultModelId)) {
if (availableModelNames.size > 0) {
const firstAvailableModel =
allModelNames.find((name) => availableModelNames.has(name)) ??
Array.from(availableModelNames)[0];
defaultModelId = firstAvailableModel;
runtime.log(
`Ollama model ${requestedDefaultModelId} was not available; using ${defaultModelId} instead.`,
);
} else {
runtime.error(
[
`No Ollama models are available at ${baseUrl}.`,
"Pull a model first, then re-run setup.",
].join("\n"),
);
runtime.exit(1);
return params.nextConfig;
}
}
const config = applyOllamaProviderConfig(
params.nextConfig,
baseUrl,
allModelNames,
discoveredModelsByName,
);
const modelRef = `ollama/${defaultModelId}`;
runtime.log(`Default Ollama model: ${defaultModelId}`);
return applyAgentDefaultModelPrimary(config, modelRef);
}
/** Pull the configured default Ollama model if it isn't already available locally. */
export async function ensureOllamaModelPulled(params: {
config: OpenClawConfig;
prompter: WizardPrompter;
}): Promise<void> {
const modelCfg = params.config.agents?.defaults?.model;
const modelId = typeof modelCfg === "string" ? modelCfg : modelCfg?.primary;
if (!modelId?.startsWith("ollama/")) {
return;
}
const baseUrl = params.config.models?.providers?.ollama?.baseUrl ?? OLLAMA_DEFAULT_BASE_URL;
const modelName = modelId.slice("ollama/".length);
if (isOllamaCloudModel(modelName)) {
return;
}
const { models } = await fetchOllamaModels(baseUrl);
if (models.some((m) => m.name === modelName)) {
return;
}
const pulled = await pullOllamaModel(baseUrl, modelName, params.prompter);
if (!pulled) {
throw new WizardCancelledError("Failed to download selected Ollama model");
}
}
export * from "../plugins/provider-ollama-setup.js";

View File

@ -1,304 +1 @@
import type { ApiKeyCredential, AuthProfileCredential } from "../agents/auth-profiles/types.js";
import { upsertAuthProfileWithLock } from "../agents/auth-profiles/upsert-with-lock.js";
import {
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
} from "../agents/self-hosted-provider-defaults.js";
import type { OpenClawConfig } from "../config/config.js";
import { applyAuthProfileConfig } from "../plugins/provider-auth-helpers.js";
import type {
ProviderDiscoveryContext,
ProviderAuthResult,
ProviderAuthMethodNonInteractiveContext,
ProviderNonInteractiveApiKeyResult,
} from "../plugins/types.js";
import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js";
import type { WizardPrompter } from "../wizard/prompts.js";
export {
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
} from "../agents/self-hosted-provider-defaults.js";
export function applyProviderDefaultModel(cfg: OpenClawConfig, modelRef: string): OpenClawConfig {
const existingModel = cfg.agents?.defaults?.model;
const fallbacks =
existingModel && typeof existingModel === "object" && "fallbacks" in existingModel
? (existingModel as { fallbacks?: string[] }).fallbacks
: undefined;
return {
...cfg,
agents: {
...cfg.agents,
defaults: {
...cfg.agents?.defaults,
model: {
...(fallbacks ? { fallbacks } : undefined),
primary: modelRef,
},
},
},
};
}
function buildOpenAICompatibleSelfHostedProviderConfig(params: {
cfg: OpenClawConfig;
providerId: string;
baseUrl: string;
providerApiKey: string;
modelId: string;
input?: Array<"text" | "image">;
reasoning?: boolean;
contextWindow?: number;
maxTokens?: number;
}): { config: OpenClawConfig; modelId: string; modelRef: string; profileId: string } {
const modelRef = `${params.providerId}/${params.modelId}`;
const profileId = `${params.providerId}:default`;
return {
config: {
...params.cfg,
models: {
...params.cfg.models,
mode: params.cfg.models?.mode ?? "merge",
providers: {
...params.cfg.models?.providers,
[params.providerId]: {
baseUrl: params.baseUrl,
api: "openai-completions",
apiKey: params.providerApiKey,
models: [
{
id: params.modelId,
name: params.modelId,
reasoning: params.reasoning ?? false,
input: params.input ?? ["text"],
cost: SELF_HOSTED_DEFAULT_COST,
contextWindow: params.contextWindow ?? SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
maxTokens: params.maxTokens ?? SELF_HOSTED_DEFAULT_MAX_TOKENS,
},
],
},
},
},
},
modelId: params.modelId,
modelRef,
profileId,
};
}
type OpenAICompatibleSelfHostedProviderSetupParams = {
cfg: OpenClawConfig;
prompter: WizardPrompter;
providerId: string;
providerLabel: string;
defaultBaseUrl: string;
defaultApiKeyEnvVar: string;
modelPlaceholder: string;
input?: Array<"text" | "image">;
reasoning?: boolean;
contextWindow?: number;
maxTokens?: number;
};
type OpenAICompatibleSelfHostedProviderPromptResult = {
config: OpenClawConfig;
credential: AuthProfileCredential;
modelId: string;
modelRef: string;
profileId: string;
};
function buildSelfHostedProviderAuthResult(
result: OpenAICompatibleSelfHostedProviderPromptResult,
): ProviderAuthResult {
return {
profiles: [
{
profileId: result.profileId,
credential: result.credential,
},
],
configPatch: result.config,
defaultModel: result.modelRef,
};
}
export async function promptAndConfigureOpenAICompatibleSelfHostedProvider(
params: OpenAICompatibleSelfHostedProviderSetupParams,
): Promise<OpenAICompatibleSelfHostedProviderPromptResult> {
const baseUrlRaw = await params.prompter.text({
message: `${params.providerLabel} base URL`,
initialValue: params.defaultBaseUrl,
placeholder: params.defaultBaseUrl,
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const apiKeyRaw = await params.prompter.text({
message: `${params.providerLabel} API key`,
placeholder: "sk-... (or any non-empty string)",
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const modelIdRaw = await params.prompter.text({
message: `${params.providerLabel} model`,
placeholder: params.modelPlaceholder,
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const baseUrl = String(baseUrlRaw ?? "")
.trim()
.replace(/\/+$/, "");
const apiKey = String(apiKeyRaw ?? "").trim();
const modelId = String(modelIdRaw ?? "").trim();
const credential: AuthProfileCredential = {
type: "api_key",
provider: params.providerId,
key: apiKey,
};
const configured = buildOpenAICompatibleSelfHostedProviderConfig({
cfg: params.cfg,
providerId: params.providerId,
baseUrl,
providerApiKey: params.defaultApiKeyEnvVar,
modelId,
input: params.input,
reasoning: params.reasoning,
contextWindow: params.contextWindow,
maxTokens: params.maxTokens,
});
return {
config: configured.config,
credential,
modelId: configured.modelId,
modelRef: configured.modelRef,
profileId: configured.profileId,
};
}
export async function promptAndConfigureOpenAICompatibleSelfHostedProviderAuth(
params: OpenAICompatibleSelfHostedProviderSetupParams,
): Promise<ProviderAuthResult> {
const result = await promptAndConfigureOpenAICompatibleSelfHostedProvider(params);
return buildSelfHostedProviderAuthResult(result);
}
export async function discoverOpenAICompatibleSelfHostedProvider<
T extends Record<string, unknown>,
>(params: {
ctx: ProviderDiscoveryContext;
providerId: string;
buildProvider: (params: { apiKey?: string }) => Promise<T>;
}): Promise<{ provider: T & { apiKey: string } } | null> {
if (params.ctx.config.models?.providers?.[params.providerId]) {
return null;
}
const { apiKey, discoveryApiKey } = params.ctx.resolveProviderApiKey(params.providerId);
if (!apiKey) {
return null;
}
return {
provider: {
...(await params.buildProvider({ apiKey: discoveryApiKey })),
apiKey,
},
};
}
function buildMissingNonInteractiveModelIdMessage(params: {
authChoice: string;
providerLabel: string;
modelPlaceholder: string;
}): string {
return [
`Missing --custom-model-id for --auth-choice ${params.authChoice}.`,
`Pass the ${params.providerLabel} model id to use, for example ${params.modelPlaceholder}.`,
].join("\n");
}
function buildSelfHostedProviderCredential(params: {
ctx: ProviderAuthMethodNonInteractiveContext;
providerId: string;
resolved: ProviderNonInteractiveApiKeyResult;
}): ApiKeyCredential | null {
return params.ctx.toApiKeyCredential({
provider: params.providerId,
resolved: params.resolved,
});
}
export async function configureOpenAICompatibleSelfHostedProviderNonInteractive(params: {
ctx: ProviderAuthMethodNonInteractiveContext;
providerId: string;
providerLabel: string;
defaultBaseUrl: string;
defaultApiKeyEnvVar: string;
modelPlaceholder: string;
input?: Array<"text" | "image">;
reasoning?: boolean;
contextWindow?: number;
maxTokens?: number;
}): Promise<OpenClawConfig | null> {
const baseUrl = (
normalizeOptionalSecretInput(params.ctx.opts.customBaseUrl) ?? params.defaultBaseUrl
).replace(/\/+$/, "");
const modelId = normalizeOptionalSecretInput(params.ctx.opts.customModelId);
if (!modelId) {
params.ctx.runtime.error(
buildMissingNonInteractiveModelIdMessage({
authChoice: params.ctx.authChoice,
providerLabel: params.providerLabel,
modelPlaceholder: params.modelPlaceholder,
}),
);
params.ctx.runtime.exit(1);
return null;
}
const resolved = await params.ctx.resolveApiKey({
provider: params.providerId,
flagValue: normalizeOptionalSecretInput(params.ctx.opts.customApiKey),
flagName: "--custom-api-key",
envVar: params.defaultApiKeyEnvVar,
envVarName: params.defaultApiKeyEnvVar,
});
if (!resolved) {
return null;
}
const credential = buildSelfHostedProviderCredential({
ctx: params.ctx,
providerId: params.providerId,
resolved,
});
if (!credential) {
return null;
}
const configured = buildOpenAICompatibleSelfHostedProviderConfig({
cfg: params.ctx.config,
providerId: params.providerId,
baseUrl,
providerApiKey: params.defaultApiKeyEnvVar,
modelId,
input: params.input,
reasoning: params.reasoning,
contextWindow: params.contextWindow,
maxTokens: params.maxTokens,
});
await upsertAuthProfileWithLock({
profileId: configured.profileId,
credential,
agentDir: params.ctx.agentDir,
});
const withProfile = applyAuthProfileConfig(configured.config, {
profileId: configured.profileId,
provider: params.providerId,
mode: "api_key",
});
params.ctx.runtime.log(`Default ${params.providerLabel} model: ${modelId}`);
return applyProviderDefaultModel(withProfile, configured.modelRef);
}
export * from "../plugins/provider-self-hosted-setup.js";

View File

@ -1,302 +1 @@
import { createWriteStream } from "node:fs";
import fs from "node:fs/promises";
import { request } from "node:https";
import os from "node:os";
import path from "node:path";
import { pipeline } from "node:stream/promises";
import { extractArchive } from "../infra/archive.js";
import { resolveBrewExecutable } from "../infra/brew.js";
import { runCommandWithTimeout } from "../process/exec.js";
import type { RuntimeEnv } from "../runtime.js";
import { CONFIG_DIR } from "../utils.js";
export type ReleaseAsset = {
name?: string;
browser_download_url?: string;
};
export type NamedAsset = {
name: string;
browser_download_url: string;
};
type ReleaseResponse = {
tag_name?: string;
assets?: ReleaseAsset[];
};
export type SignalInstallResult = {
ok: boolean;
cliPath?: string;
version?: string;
error?: string;
};
/** @internal Exported for testing. */
export async function extractSignalCliArchive(
archivePath: string,
installRoot: string,
timeoutMs: number,
): Promise<void> {
await extractArchive({ archivePath, destDir: installRoot, timeoutMs });
}
/** @internal Exported for testing. */
export function looksLikeArchive(name: string): boolean {
return name.endsWith(".tar.gz") || name.endsWith(".tgz") || name.endsWith(".zip");
}
/**
* Pick a native release asset from the official GitHub releases.
*
* The official signal-cli releases only publish native (GraalVM) binaries for
* x86-64 Linux. On architectures where no native asset is available this
* returns `undefined` so the caller can fall back to a different install
* strategy (e.g. Homebrew).
*/
/** @internal Exported for testing. */
export function pickAsset(
assets: ReleaseAsset[],
platform: NodeJS.Platform,
arch: string,
): NamedAsset | undefined {
const withName = assets.filter((asset): asset is NamedAsset =>
Boolean(asset.name && asset.browser_download_url),
);
// Archives only, excluding signature files (.asc)
const archives = withName.filter((a) => looksLikeArchive(a.name.toLowerCase()));
const byName = (pattern: RegExp) =>
archives.find((asset) => pattern.test(asset.name.toLowerCase()));
if (platform === "linux") {
// The official "Linux-native" asset is an x86-64 GraalVM binary.
// On non-x64 architectures it will fail with "Exec format error",
// so only select it when the host architecture matches.
if (arch === "x64") {
return byName(/linux-native/) || byName(/linux/) || archives[0];
}
// No native release for this arch — caller should fall back.
return undefined;
}
if (platform === "darwin") {
return byName(/macos|osx|darwin/) || archives[0];
}
if (platform === "win32") {
return byName(/windows|win/) || archives[0];
}
return archives[0];
}
async function downloadToFile(url: string, dest: string, maxRedirects = 5): Promise<void> {
await new Promise<void>((resolve, reject) => {
const req = request(url, (res) => {
if (res.statusCode && res.statusCode >= 300 && res.statusCode < 400) {
const location = res.headers.location;
if (!location || maxRedirects <= 0) {
reject(new Error("Redirect loop or missing Location header"));
return;
}
const redirectUrl = new URL(location, url).href;
resolve(downloadToFile(redirectUrl, dest, maxRedirects - 1));
return;
}
if (!res.statusCode || res.statusCode >= 400) {
reject(new Error(`HTTP ${res.statusCode ?? "?"} downloading file`));
return;
}
const out = createWriteStream(dest);
pipeline(res, out).then(resolve).catch(reject);
});
req.on("error", reject);
req.end();
});
}
async function findSignalCliBinary(root: string): Promise<string | null> {
const candidates: string[] = [];
const enqueue = async (dir: string, depth: number) => {
if (depth > 3) {
return;
}
const entries = await fs.readdir(dir, { withFileTypes: true }).catch(() => []);
for (const entry of entries) {
const full = path.join(dir, entry.name);
if (entry.isDirectory()) {
await enqueue(full, depth + 1);
} else if (entry.isFile() && entry.name === "signal-cli") {
candidates.push(full);
}
}
};
await enqueue(root, 0);
return candidates[0] ?? null;
}
// ---------------------------------------------------------------------------
// Brew-based install (used on architectures without an official native build)
// ---------------------------------------------------------------------------
async function resolveBrewSignalCliPath(brewExe: string): Promise<string | null> {
try {
const result = await runCommandWithTimeout([brewExe, "--prefix", "signal-cli"], {
timeoutMs: 10_000,
});
if (result.code === 0 && result.stdout.trim()) {
const prefix = result.stdout.trim();
// Homebrew installs the wrapper script at <prefix>/bin/signal-cli
const candidate = path.join(prefix, "bin", "signal-cli");
try {
await fs.access(candidate);
return candidate;
} catch {
// Fall back to searching the prefix
return findSignalCliBinary(prefix);
}
}
} catch {
// ignore
}
return null;
}
async function installSignalCliViaBrew(runtime: RuntimeEnv): Promise<SignalInstallResult> {
const brewExe = resolveBrewExecutable();
if (!brewExe) {
return {
ok: false,
error:
`No native signal-cli build is available for ${process.arch}. ` +
"Install Homebrew (https://brew.sh) and try again, or install signal-cli manually.",
};
}
runtime.log(`Installing signal-cli via Homebrew (${brewExe})…`);
const result = await runCommandWithTimeout([brewExe, "install", "signal-cli"], {
timeoutMs: 15 * 60_000, // brew builds from source; can take a while
});
if (result.code !== 0) {
return {
ok: false,
error: `brew install signal-cli failed (exit ${result.code}): ${result.stderr.trim().slice(0, 200)}`,
};
}
const cliPath = await resolveBrewSignalCliPath(brewExe);
if (!cliPath) {
return {
ok: false,
error: "brew install succeeded but signal-cli binary was not found.",
};
}
// Extract version from the installed binary.
let version: string | undefined;
try {
const vResult = await runCommandWithTimeout([cliPath, "--version"], {
timeoutMs: 10_000,
});
// Output is typically "signal-cli 0.13.24"
version = vResult.stdout.trim().replace(/^signal-cli\s+/, "") || undefined;
} catch {
// non-critical; leave version undefined
}
return { ok: true, cliPath, version };
}
// ---------------------------------------------------------------------------
// Direct download install (used when an official native asset is available)
// ---------------------------------------------------------------------------
async function installSignalCliFromRelease(runtime: RuntimeEnv): Promise<SignalInstallResult> {
const apiUrl = "https://api.github.com/repos/AsamK/signal-cli/releases/latest";
const response = await fetch(apiUrl, {
headers: {
"User-Agent": "openclaw",
Accept: "application/vnd.github+json",
},
});
if (!response.ok) {
return {
ok: false,
error: `Failed to fetch release info (${response.status})`,
};
}
const payload = (await response.json()) as ReleaseResponse;
const version = payload.tag_name?.replace(/^v/, "") ?? "unknown";
const assets = payload.assets ?? [];
const asset = pickAsset(assets, process.platform, process.arch);
if (!asset) {
return {
ok: false,
error: "No compatible release asset found for this platform.",
};
}
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-signal-"));
const archivePath = path.join(tmpDir, asset.name);
runtime.log(`Downloading signal-cli ${version} (${asset.name})…`);
await downloadToFile(asset.browser_download_url, archivePath);
const installRoot = path.join(CONFIG_DIR, "tools", "signal-cli", version);
await fs.mkdir(installRoot, { recursive: true });
if (!looksLikeArchive(asset.name.toLowerCase())) {
return { ok: false, error: `Unsupported archive type: ${asset.name}` };
}
try {
await extractSignalCliArchive(archivePath, installRoot, 60_000);
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
return {
ok: false,
error: `Failed to extract ${asset.name}: ${message}`,
};
}
const cliPath = await findSignalCliBinary(installRoot);
if (!cliPath) {
return {
ok: false,
error: `signal-cli binary not found after extracting ${asset.name}`,
};
}
await fs.chmod(cliPath, 0o755).catch(() => {});
return { ok: true, cliPath, version };
}
// ---------------------------------------------------------------------------
// Public entry point
// ---------------------------------------------------------------------------
export async function installSignalCli(runtime: RuntimeEnv): Promise<SignalInstallResult> {
if (process.platform === "win32") {
return {
ok: false,
error: "Signal CLI auto-install is not supported on Windows yet.",
};
}
// The official signal-cli GitHub releases only ship a native binary for
// x86-64 Linux. On other architectures (arm64, armv7, etc.) we delegate
// to Homebrew which builds from source and bundles the JRE automatically.
const hasNativeRelease = process.platform !== "linux" || process.arch === "x64";
if (hasNativeRelease) {
return installSignalCliFromRelease(runtime);
}
return installSignalCliViaBrew(runtime);
}
export * from "../plugins/signal-cli-install.js";

View File

@ -1,42 +1 @@
import {
VLLM_DEFAULT_API_KEY_ENV_VAR,
VLLM_DEFAULT_BASE_URL,
VLLM_MODEL_PLACEHOLDER,
VLLM_PROVIDER_LABEL,
} from "../agents/vllm-defaults.js";
import type { OpenClawConfig } from "../config/config.js";
import type { WizardPrompter } from "../wizard/prompts.js";
import {
applyProviderDefaultModel,
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
promptAndConfigureOpenAICompatibleSelfHostedProvider,
} from "./self-hosted-provider-setup.js";
export { VLLM_DEFAULT_BASE_URL } from "../agents/vllm-defaults.js";
export const VLLM_DEFAULT_CONTEXT_WINDOW = SELF_HOSTED_DEFAULT_CONTEXT_WINDOW;
export const VLLM_DEFAULT_MAX_TOKENS = SELF_HOSTED_DEFAULT_MAX_TOKENS;
export const VLLM_DEFAULT_COST = SELF_HOSTED_DEFAULT_COST;
export async function promptAndConfigureVllm(params: {
cfg: OpenClawConfig;
prompter: WizardPrompter;
}): Promise<{ config: OpenClawConfig; modelId: string; modelRef: string }> {
const result = await promptAndConfigureOpenAICompatibleSelfHostedProvider({
cfg: params.cfg,
prompter: params.prompter,
providerId: "vllm",
providerLabel: VLLM_PROVIDER_LABEL,
defaultBaseUrl: VLLM_DEFAULT_BASE_URL,
defaultApiKeyEnvVar: VLLM_DEFAULT_API_KEY_ENV_VAR,
modelPlaceholder: VLLM_MODEL_PLACEHOLDER,
});
return {
config: result.config,
modelId: result.modelId,
modelRef: result.modelRef,
};
}
export { applyProviderDefaultModel as applyVllmDefaultModel };
export * from "../plugins/provider-vllm-setup.js";

View File

@ -1,179 +1 @@
import {
ZAI_CN_BASE_URL,
ZAI_CODING_CN_BASE_URL,
ZAI_CODING_GLOBAL_BASE_URL,
ZAI_GLOBAL_BASE_URL,
} from "../../extensions/zai/model-definitions.js";
import { fetchWithTimeout } from "../utils/fetch-timeout.js";
export type ZaiEndpointId = "global" | "cn" | "coding-global" | "coding-cn";
export type ZaiDetectedEndpoint = {
endpoint: ZaiEndpointId;
/** Provider baseUrl to store in config. */
baseUrl: string;
/** Recommended default model id for that endpoint. */
modelId: string;
/** Human-readable note explaining the choice. */
note: string;
};
type ProbeResult =
| { ok: true }
| {
ok: false;
status?: number;
errorCode?: string;
errorMessage?: string;
};
async function probeZaiChatCompletions(params: {
baseUrl: string;
apiKey: string;
modelId: string;
timeoutMs: number;
fetchFn?: typeof fetch;
}): Promise<ProbeResult> {
try {
const res = await fetchWithTimeout(
`${params.baseUrl}/chat/completions`,
{
method: "POST",
headers: {
authorization: `Bearer ${params.apiKey}`,
"content-type": "application/json",
},
body: JSON.stringify({
model: params.modelId,
stream: false,
max_tokens: 1,
messages: [{ role: "user", content: "ping" }],
}),
},
params.timeoutMs,
params.fetchFn,
);
if (res.ok) {
return { ok: true };
}
let errorCode: string | undefined;
let errorMessage: string | undefined;
try {
const json = (await res.json()) as {
error?: { code?: unknown; message?: unknown };
msg?: unknown;
message?: unknown;
};
const code = json?.error?.code;
const msg = json?.error?.message ?? json?.msg ?? json?.message;
if (typeof code === "string") {
errorCode = code;
} else if (typeof code === "number") {
errorCode = String(code);
}
if (typeof msg === "string") {
errorMessage = msg;
}
} catch {
// ignore
}
return { ok: false, status: res.status, errorCode, errorMessage };
} catch {
return { ok: false };
}
}
export async function detectZaiEndpoint(params: {
apiKey: string;
endpoint?: ZaiEndpointId;
timeoutMs?: number;
fetchFn?: typeof fetch;
}): Promise<ZaiDetectedEndpoint | null> {
// Never auto-probe in vitest; it would create flaky network behavior.
if (process.env.VITEST && !params.fetchFn) {
return null;
}
const timeoutMs = params.timeoutMs ?? 5_000;
const probeCandidates = (() => {
const general = [
{
endpoint: "global" as const,
baseUrl: ZAI_GLOBAL_BASE_URL,
modelId: "glm-5",
note: "Verified GLM-5 on global endpoint.",
},
{
endpoint: "cn" as const,
baseUrl: ZAI_CN_BASE_URL,
modelId: "glm-5",
note: "Verified GLM-5 on cn endpoint.",
},
];
const codingGlm5 = [
{
endpoint: "coding-global" as const,
baseUrl: ZAI_CODING_GLOBAL_BASE_URL,
modelId: "glm-5",
note: "Verified GLM-5 on coding-global endpoint.",
},
{
endpoint: "coding-cn" as const,
baseUrl: ZAI_CODING_CN_BASE_URL,
modelId: "glm-5",
note: "Verified GLM-5 on coding-cn endpoint.",
},
];
const codingFallback = [
{
endpoint: "coding-global" as const,
baseUrl: ZAI_CODING_GLOBAL_BASE_URL,
modelId: "glm-4.7",
note: "Coding Plan endpoint verified, but this key/plan does not expose GLM-5 there. Defaulting to GLM-4.7.",
},
{
endpoint: "coding-cn" as const,
baseUrl: ZAI_CODING_CN_BASE_URL,
modelId: "glm-4.7",
note: "Coding Plan CN endpoint verified, but this key/plan does not expose GLM-5 there. Defaulting to GLM-4.7.",
},
];
switch (params.endpoint) {
case "global":
return general.filter((candidate) => candidate.endpoint === "global");
case "cn":
return general.filter((candidate) => candidate.endpoint === "cn");
case "coding-global":
return [
...codingGlm5.filter((candidate) => candidate.endpoint === "coding-global"),
...codingFallback.filter((candidate) => candidate.endpoint === "coding-global"),
];
case "coding-cn":
return [
...codingGlm5.filter((candidate) => candidate.endpoint === "coding-cn"),
...codingFallback.filter((candidate) => candidate.endpoint === "coding-cn"),
];
default:
return [...general, ...codingGlm5, ...codingFallback];
}
})();
for (const candidate of probeCandidates) {
const result = await probeZaiChatCompletions({
baseUrl: candidate.baseUrl,
apiKey: params.apiKey,
modelId: candidate.modelId,
timeoutMs,
fetchFn: params.fetchFn,
});
if (result.ok) {
return candidate;
}
}
return null;
}
export * from "../plugins/provider-zai-endpoint.js";

View File

@ -30,6 +30,8 @@ export {
setSetupChannelEnabled,
splitSetupEntries,
} from "../channels/plugins/setup-wizard-helpers.js";
export { detectBinary } from "../plugins/setup-binary.js";
export { installSignalCli } from "../plugins/signal-cli-install.js";
export { formatCliCommand } from "../cli/command-format.js";
export { formatDocsLink } from "../terminal/links.js";
export { hasConfiguredSecretInput } from "../config/types.secrets.js";

View File

@ -24,5 +24,6 @@ export * from "../agents/tools/web-shared.js";
export * from "../agents/tools/discord-actions-moderation-shared.js";
export * from "../agents/tools/web-fetch-utils.js";
export * from "../agents/vllm-defaults.js";
// Intentional public runtime surface: channel plugins use ingress agent helpers directly.
export * from "../commands/agent.js";
export * from "../tts/tts.js";

View File

@ -425,8 +425,8 @@ export {
resolveRuntimeEnv,
resolveRuntimeEnvWithUnavailableExit,
} from "./runtime.js";
export { detectBinary } from "../commands/onboard-helpers.js";
export { installSignalCli } from "../commands/signal-install.js";
export { detectBinary } from "../plugins/setup-binary.js";
export { installSignalCli } from "../plugins/signal-cli-install.js";
export { chunkTextForOutbound } from "./text-chunking.js";
export { resolveTextChunkLimit } from "../auto-reply/chunk.js";
export { readBooleanParam } from "./boolean-param.js";
@ -798,21 +798,21 @@ export {
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
} from "../commands/self-hosted-provider-setup.ts";
} from "../plugins/provider-self-hosted-setup.js";
export {
OLLAMA_DEFAULT_BASE_URL,
OLLAMA_DEFAULT_MODEL,
configureOllamaNonInteractive,
ensureOllamaModelPulled,
promptAndConfigureOllama,
} from "../commands/ollama-setup.ts";
} from "../plugins/provider-ollama-setup.js";
export {
VLLM_DEFAULT_BASE_URL,
VLLM_DEFAULT_CONTEXT_WINDOW,
VLLM_DEFAULT_COST,
VLLM_DEFAULT_MAX_TOKENS,
promptAndConfigureVllm,
} from "../commands/vllm-setup.ts";
} from "../plugins/provider-vllm-setup.js";
export {
buildOllamaProvider,
buildSglangProvider,

View File

@ -12,6 +12,6 @@ export {
configureOllamaNonInteractive,
ensureOllamaModelPulled,
promptAndConfigureOllama,
} from "../commands/ollama-setup.ts";
} from "../plugins/provider-ollama-setup.js";
export { buildOllamaProvider } from "../agents/models-config.providers.discovery.js";

View File

@ -15,21 +15,21 @@ export {
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
} from "../commands/self-hosted-provider-setup.ts";
} from "../plugins/provider-self-hosted-setup.js";
export {
OLLAMA_DEFAULT_BASE_URL,
OLLAMA_DEFAULT_MODEL,
configureOllamaNonInteractive,
ensureOllamaModelPulled,
promptAndConfigureOllama,
} from "../commands/ollama-setup.ts";
} from "../plugins/provider-ollama-setup.js";
export {
VLLM_DEFAULT_BASE_URL,
VLLM_DEFAULT_CONTEXT_WINDOW,
VLLM_DEFAULT_COST,
VLLM_DEFAULT_MAX_TOKENS,
promptAndConfigureVllm,
} from "../commands/vllm-setup.ts";
} from "../plugins/provider-vllm-setup.js";
export {
buildOllamaProvider,
buildSglangProvider,

View File

@ -15,7 +15,7 @@ export {
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
} from "../commands/self-hosted-provider-setup.ts";
} from "../plugins/provider-self-hosted-setup.js";
export {
buildSglangProvider,

View File

@ -12,8 +12,8 @@ export type { ChannelSetupWizard } from "../channels/plugins/setup-wizard.js";
export { DEFAULT_ACCOUNT_ID, normalizeAccountId } from "../routing/session-key.js";
export { formatCliCommand } from "../cli/command-format.js";
export { detectBinary } from "../commands/onboard-helpers.js";
export { installSignalCli } from "../commands/signal-install.js";
export { detectBinary } from "../plugins/setup-binary.js";
export { installSignalCli } from "../plugins/signal-cli-install.js";
export { formatDocsLink } from "../terminal/links.js";
export { hasConfiguredSecretInput, normalizeSecretInputString } from "../config/types.secrets.js";
export { normalizeE164, pathExists } from "../utils.js";

View File

@ -4,4 +4,4 @@ export {
detectZaiEndpoint,
type ZaiDetectedEndpoint,
type ZaiEndpointId,
} from "../commands/zai-endpoint-detect.js";
} from "../plugins/provider-zai-endpoint.js";

View File

@ -1,7 +1,6 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { clearRuntimeAuthProfileStoreSnapshots } from "../../agents/auth-profiles/store.js";
import { applyAuthChoiceLoadedPluginProvider } from "../../commands/auth-choice.apply.plugin-provider.js";
import type { AuthChoice } from "../../commands/onboard-types.js";
import {
createAuthTestLifecycle,
createExitThrowingRuntime,
@ -129,7 +128,7 @@ describe("provider auth-choice contract", () => {
{ authChoice: "minimax-global-oauth" as const, expectedProvider: "minimax-portal" },
{ authChoice: "modelstudio-api-key" as const, expectedProvider: "modelstudio" },
{ authChoice: "ollama" as const, expectedProvider: "ollama" },
{ authChoice: "unknown" as AuthChoice, expectedProvider: undefined },
{ authChoice: "unknown", expectedProvider: undefined },
] as const;
for (const scenario of scenarios) {

View File

@ -0,0 +1,535 @@
import { upsertAuthProfileWithLock } from "../agents/auth-profiles/upsert-with-lock.js";
import { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-defaults.js";
import {
buildOllamaModelDefinition,
enrichOllamaModelsWithContext,
fetchOllamaModels,
resolveOllamaApiBase,
type OllamaModelWithContext,
} from "../agents/ollama-models.js";
import type { OpenClawConfig } from "../config/config.js";
import type { RuntimeEnv } from "../runtime.js";
import { WizardCancelledError, type WizardPrompter } from "../wizard/prompts.js";
import { applyAgentDefaultModelPrimary } from "./provider-onboarding-config.js";
import { isRemoteEnvironment, openUrl } from "./setup-browser.js";
import type { ProviderAuthOptionBag } from "./types.js";
export { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-defaults.js";
export const OLLAMA_DEFAULT_MODEL = "glm-4.7-flash";
const OLLAMA_SUGGESTED_MODELS_LOCAL = ["glm-4.7-flash"];
const OLLAMA_SUGGESTED_MODELS_CLOUD = ["kimi-k2.5:cloud", "minimax-m2.5:cloud", "glm-5:cloud"];
type OllamaMode = "remote" | "local";
type OllamaSetupOptions = ProviderAuthOptionBag & {
customBaseUrl?: string;
customModelId?: string;
};
function normalizeOllamaModelName(value: string | undefined): string | undefined {
const trimmed = value?.trim();
if (!trimmed) {
return undefined;
}
if (trimmed.toLowerCase().startsWith("ollama/")) {
const withoutPrefix = trimmed.slice("ollama/".length).trim();
return withoutPrefix || undefined;
}
return trimmed;
}
function isOllamaCloudModel(modelName: string | undefined): boolean {
return Boolean(modelName?.trim().toLowerCase().endsWith(":cloud"));
}
function formatOllamaPullStatus(status: string): { text: string; hidePercent: boolean } {
const trimmed = status.trim();
const partStatusMatch = trimmed.match(/^([a-z-]+)\s+(?:sha256:)?[a-f0-9]{8,}$/i);
if (partStatusMatch) {
return { text: `${partStatusMatch[1]} part`, hidePercent: false };
}
if (/^verifying\b.*\bdigest\b/i.test(trimmed)) {
return { text: "verifying digest", hidePercent: true };
}
return { text: trimmed, hidePercent: false };
}
type OllamaCloudAuthResult = {
signedIn: boolean;
signinUrl?: string;
};
/** Check if the user is signed in to Ollama cloud via /api/me. */
async function checkOllamaCloudAuth(baseUrl: string): Promise<OllamaCloudAuthResult> {
try {
const apiBase = resolveOllamaApiBase(baseUrl);
const response = await fetch(`${apiBase}/api/me`, {
method: "POST",
signal: AbortSignal.timeout(5000),
});
if (response.status === 401) {
// 401 body contains { error, signin_url }
const data = (await response.json()) as { signin_url?: string };
return { signedIn: false, signinUrl: data.signin_url };
}
if (!response.ok) {
return { signedIn: false };
}
return { signedIn: true };
} catch {
// /api/me not supported or unreachable — fail closed so cloud mode
// doesn't silently skip auth; the caller handles the fallback.
return { signedIn: false };
}
}
type OllamaPullChunk = {
status?: string;
total?: number;
completed?: number;
error?: string;
};
type OllamaPullFailureKind = "http" | "no-body" | "chunk-error" | "network";
type OllamaPullResult =
| { ok: true }
| {
ok: false;
kind: OllamaPullFailureKind;
message: string;
};
async function pullOllamaModelCore(params: {
baseUrl: string;
modelName: string;
onStatus?: (status: string, percent: number | null) => void;
}): Promise<OllamaPullResult> {
const { onStatus } = params;
const baseUrl = resolveOllamaApiBase(params.baseUrl);
const modelName = normalizeOllamaModelName(params.modelName) ?? params.modelName.trim();
try {
const response = await fetch(`${baseUrl}/api/pull`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ name: modelName }),
});
if (!response.ok) {
return {
ok: false,
kind: "http",
message: `Failed to download ${modelName} (HTTP ${response.status})`,
};
}
if (!response.body) {
return {
ok: false,
kind: "no-body",
message: `Failed to download ${modelName} (no response body)`,
};
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = "";
const layers = new Map<string, { total: number; completed: number }>();
const parseLine = (line: string): OllamaPullResult => {
const trimmed = line.trim();
if (!trimmed) {
return { ok: true };
}
try {
const chunk = JSON.parse(trimmed) as OllamaPullChunk;
if (chunk.error) {
return {
ok: false,
kind: "chunk-error",
message: `Download failed: ${chunk.error}`,
};
}
if (!chunk.status) {
return { ok: true };
}
if (chunk.total && chunk.completed !== undefined) {
layers.set(chunk.status, { total: chunk.total, completed: chunk.completed });
let totalSum = 0;
let completedSum = 0;
for (const layer of layers.values()) {
totalSum += layer.total;
completedSum += layer.completed;
}
const percent = totalSum > 0 ? Math.round((completedSum / totalSum) * 100) : null;
onStatus?.(chunk.status, percent);
} else {
onStatus?.(chunk.status, null);
}
} catch {
// Ignore malformed lines from streaming output.
}
return { ok: true };
};
for (;;) {
const { done, value } = await reader.read();
if (done) {
break;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() ?? "";
for (const line of lines) {
const parsed = parseLine(line);
if (!parsed.ok) {
return parsed;
}
}
}
const trailing = buffer.trim();
if (trailing) {
const parsed = parseLine(trailing);
if (!parsed.ok) {
return parsed;
}
}
return { ok: true };
} catch (err) {
const reason = err instanceof Error ? err.message : String(err);
return {
ok: false,
kind: "network",
message: `Failed to download ${modelName}: ${reason}`,
};
}
}
/** Pull a model from Ollama, streaming progress updates. */
async function pullOllamaModel(
baseUrl: string,
modelName: string,
prompter: WizardPrompter,
): Promise<boolean> {
const spinner = prompter.progress(`Downloading ${modelName}...`);
const result = await pullOllamaModelCore({
baseUrl,
modelName,
onStatus: (status, percent) => {
const displayStatus = formatOllamaPullStatus(status);
if (displayStatus.hidePercent) {
spinner.update(`Downloading ${modelName} - ${displayStatus.text}`);
} else {
spinner.update(`Downloading ${modelName} - ${displayStatus.text} - ${percent ?? 0}%`);
}
},
});
if (!result.ok) {
spinner.stop(result.message);
return false;
}
spinner.stop(`Downloaded ${modelName}`);
return true;
}
async function pullOllamaModelNonInteractive(
baseUrl: string,
modelName: string,
runtime: RuntimeEnv,
): Promise<boolean> {
runtime.log(`Downloading ${modelName}...`);
const result = await pullOllamaModelCore({ baseUrl, modelName });
if (!result.ok) {
runtime.error(result.message);
return false;
}
runtime.log(`Downloaded ${modelName}`);
return true;
}
function buildOllamaModelsConfig(
modelNames: string[],
discoveredModelsByName?: Map<string, OllamaModelWithContext>,
) {
return modelNames.map((name) =>
buildOllamaModelDefinition(name, discoveredModelsByName?.get(name)?.contextWindow),
);
}
function applyOllamaProviderConfig(
cfg: OpenClawConfig,
baseUrl: string,
modelNames: string[],
discoveredModelsByName?: Map<string, OllamaModelWithContext>,
): OpenClawConfig {
return {
...cfg,
models: {
...cfg.models,
mode: cfg.models?.mode ?? "merge",
providers: {
...cfg.models?.providers,
ollama: {
baseUrl,
api: "ollama",
apiKey: "OLLAMA_API_KEY", // pragma: allowlist secret
models: buildOllamaModelsConfig(modelNames, discoveredModelsByName),
},
},
},
};
}
async function storeOllamaCredential(agentDir?: string): Promise<void> {
await upsertAuthProfileWithLock({
profileId: "ollama:default",
credential: { type: "api_key", provider: "ollama", key: "ollama-local" },
agentDir,
});
}
/**
* Interactive: prompt for base URL, discover models, configure provider.
* Model selection is handled by the standard model picker downstream.
*/
export async function promptAndConfigureOllama(params: {
cfg: OpenClawConfig;
prompter: WizardPrompter;
}): Promise<{ config: OpenClawConfig; defaultModelId: string }> {
const { prompter } = params;
// 1. Prompt base URL
const baseUrlRaw = await prompter.text({
message: "Ollama base URL",
initialValue: OLLAMA_DEFAULT_BASE_URL,
placeholder: OLLAMA_DEFAULT_BASE_URL,
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const configuredBaseUrl = String(baseUrlRaw ?? "")
.trim()
.replace(/\/+$/, "");
const baseUrl = resolveOllamaApiBase(configuredBaseUrl);
// 2. Check reachability
const { reachable, models } = await fetchOllamaModels(baseUrl);
if (!reachable) {
await prompter.note(
[
`Ollama could not be reached at ${baseUrl}.`,
"Download it at https://ollama.com/download",
"",
"Start Ollama and re-run setup.",
].join("\n"),
"Ollama",
);
throw new WizardCancelledError("Ollama not reachable");
}
const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50));
const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model]));
const modelNames = models.map((m) => m.name);
// 3. Mode selection
const mode = (await prompter.select({
message: "Ollama mode",
options: [
{ value: "remote", label: "Cloud + Local", hint: "Ollama cloud models + local models" },
{ value: "local", label: "Local", hint: "Local models only" },
],
})) as OllamaMode;
// 4. Cloud auth — check /api/me upfront for remote (cloud+local) mode
let cloudAuthVerified = false;
if (mode === "remote") {
const authResult = await checkOllamaCloudAuth(baseUrl);
if (!authResult.signedIn) {
if (authResult.signinUrl) {
if (!isRemoteEnvironment()) {
await openUrl(authResult.signinUrl);
}
await prompter.note(
["Sign in to Ollama Cloud:", authResult.signinUrl].join("\n"),
"Ollama Cloud",
);
const confirmed = await prompter.confirm({
message: "Have you signed in?",
});
if (!confirmed) {
throw new WizardCancelledError("Ollama cloud sign-in cancelled");
}
// Re-check after user claims sign-in
const recheck = await checkOllamaCloudAuth(baseUrl);
if (!recheck.signedIn) {
throw new WizardCancelledError("Ollama cloud sign-in required");
}
cloudAuthVerified = true;
} else {
// No signin URL available (older server, unreachable /api/me, or custom gateway).
await prompter.note(
[
"Could not verify Ollama Cloud authentication.",
"Cloud models may not work until you sign in at https://ollama.com.",
].join("\n"),
"Ollama Cloud",
);
const continueAnyway = await prompter.confirm({
message: "Continue without cloud auth?",
});
if (!continueAnyway) {
throw new WizardCancelledError("Ollama cloud auth could not be verified");
}
// Cloud auth unverified — fall back to local defaults so the model
// picker doesn't steer toward cloud models that may fail.
}
} else {
cloudAuthVerified = true;
}
}
// 5. Model ordering — suggested models first.
// Use cloud defaults only when auth was actually verified; otherwise fall
// back to local defaults so the user isn't steered toward cloud models
// that may fail at runtime.
const suggestedModels =
mode === "local" || !cloudAuthVerified
? OLLAMA_SUGGESTED_MODELS_LOCAL
: OLLAMA_SUGGESTED_MODELS_CLOUD;
const orderedModelNames = [
...suggestedModels,
...modelNames.filter((name) => !suggestedModels.includes(name)),
];
const defaultModelId = suggestedModels[0] ?? OLLAMA_DEFAULT_MODEL;
const config = applyOllamaProviderConfig(
params.cfg,
baseUrl,
orderedModelNames,
discoveredModelsByName,
);
return { config, defaultModelId };
}
/** Non-interactive: auto-discover models and configure provider. */
export async function configureOllamaNonInteractive(params: {
nextConfig: OpenClawConfig;
opts: OllamaSetupOptions;
runtime: RuntimeEnv;
}): Promise<OpenClawConfig> {
const { opts, runtime } = params;
const configuredBaseUrl = (opts.customBaseUrl?.trim() || OLLAMA_DEFAULT_BASE_URL).replace(
/\/+$/,
"",
);
const baseUrl = resolveOllamaApiBase(configuredBaseUrl);
const { reachable, models } = await fetchOllamaModels(baseUrl);
const explicitModel = normalizeOllamaModelName(opts.customModelId);
if (!reachable) {
runtime.error(
[
`Ollama could not be reached at ${baseUrl}.`,
"Download it at https://ollama.com/download",
].join("\n"),
);
runtime.exit(1);
return params.nextConfig;
}
await storeOllamaCredential();
const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50));
const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model]));
const modelNames = models.map((m) => m.name);
// Apply local suggested model ordering.
const suggestedModels = OLLAMA_SUGGESTED_MODELS_LOCAL;
const orderedModelNames = [
...suggestedModels,
...modelNames.filter((name) => !suggestedModels.includes(name)),
];
const requestedDefaultModelId = explicitModel ?? suggestedModels[0];
let pulledRequestedModel = false;
const availableModelNames = new Set(modelNames);
const requestedCloudModel = isOllamaCloudModel(requestedDefaultModelId);
if (requestedCloudModel) {
availableModelNames.add(requestedDefaultModelId);
}
// Pull if model not in discovered list and Ollama is reachable
if (!requestedCloudModel && !modelNames.includes(requestedDefaultModelId)) {
pulledRequestedModel = await pullOllamaModelNonInteractive(
baseUrl,
requestedDefaultModelId,
runtime,
);
if (pulledRequestedModel) {
availableModelNames.add(requestedDefaultModelId);
}
}
let allModelNames = orderedModelNames;
let defaultModelId = requestedDefaultModelId;
if (
(pulledRequestedModel || requestedCloudModel) &&
!allModelNames.includes(requestedDefaultModelId)
) {
allModelNames = [...allModelNames, requestedDefaultModelId];
}
if (!availableModelNames.has(requestedDefaultModelId)) {
if (availableModelNames.size > 0) {
const firstAvailableModel =
allModelNames.find((name) => availableModelNames.has(name)) ??
Array.from(availableModelNames)[0];
defaultModelId = firstAvailableModel;
runtime.log(
`Ollama model ${requestedDefaultModelId} was not available; using ${defaultModelId} instead.`,
);
} else {
runtime.error(
[
`No Ollama models are available at ${baseUrl}.`,
"Pull a model first, then re-run setup.",
].join("\n"),
);
runtime.exit(1);
return params.nextConfig;
}
}
const config = applyOllamaProviderConfig(
params.nextConfig,
baseUrl,
allModelNames,
discoveredModelsByName,
);
const modelRef = `ollama/${defaultModelId}`;
runtime.log(`Default Ollama model: ${defaultModelId}`);
return applyAgentDefaultModelPrimary(config, modelRef);
}
/** Pull the configured default Ollama model if it isn't already available locally. */
export async function ensureOllamaModelPulled(params: {
config: OpenClawConfig;
prompter: WizardPrompter;
}): Promise<void> {
const modelCfg = params.config.agents?.defaults?.model;
const modelId = typeof modelCfg === "string" ? modelCfg : modelCfg?.primary;
if (!modelId?.startsWith("ollama/")) {
return;
}
const baseUrl = params.config.models?.providers?.ollama?.baseUrl ?? OLLAMA_DEFAULT_BASE_URL;
const modelName = modelId.slice("ollama/".length);
if (isOllamaCloudModel(modelName)) {
return;
}
const { models } = await fetchOllamaModels(baseUrl);
if (models.some((m) => m.name === modelName)) {
return;
}
const pulled = await pullOllamaModel(baseUrl, modelName, params.prompter);
if (!pulled) {
throw new WizardCancelledError("Failed to download selected Ollama model");
}
}

View File

@ -0,0 +1,304 @@
import type { ApiKeyCredential, AuthProfileCredential } from "../agents/auth-profiles/types.js";
import { upsertAuthProfileWithLock } from "../agents/auth-profiles/upsert-with-lock.js";
import {
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
} from "../agents/self-hosted-provider-defaults.js";
import type { OpenClawConfig } from "../config/config.js";
import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js";
import type { WizardPrompter } from "../wizard/prompts.js";
import { applyAuthProfileConfig } from "./provider-auth-helpers.js";
import type {
ProviderDiscoveryContext,
ProviderAuthResult,
ProviderAuthMethodNonInteractiveContext,
ProviderNonInteractiveApiKeyResult,
} from "./types.js";
export {
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
} from "../agents/self-hosted-provider-defaults.js";
export function applyProviderDefaultModel(cfg: OpenClawConfig, modelRef: string): OpenClawConfig {
const existingModel = cfg.agents?.defaults?.model;
const fallbacks =
existingModel && typeof existingModel === "object" && "fallbacks" in existingModel
? (existingModel as { fallbacks?: string[] }).fallbacks
: undefined;
return {
...cfg,
agents: {
...cfg.agents,
defaults: {
...cfg.agents?.defaults,
model: {
...(fallbacks ? { fallbacks } : undefined),
primary: modelRef,
},
},
},
};
}
function buildOpenAICompatibleSelfHostedProviderConfig(params: {
cfg: OpenClawConfig;
providerId: string;
baseUrl: string;
providerApiKey: string;
modelId: string;
input?: Array<"text" | "image">;
reasoning?: boolean;
contextWindow?: number;
maxTokens?: number;
}): { config: OpenClawConfig; modelId: string; modelRef: string; profileId: string } {
const modelRef = `${params.providerId}/${params.modelId}`;
const profileId = `${params.providerId}:default`;
return {
config: {
...params.cfg,
models: {
...params.cfg.models,
mode: params.cfg.models?.mode ?? "merge",
providers: {
...params.cfg.models?.providers,
[params.providerId]: {
baseUrl: params.baseUrl,
api: "openai-completions",
apiKey: params.providerApiKey,
models: [
{
id: params.modelId,
name: params.modelId,
reasoning: params.reasoning ?? false,
input: params.input ?? ["text"],
cost: SELF_HOSTED_DEFAULT_COST,
contextWindow: params.contextWindow ?? SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
maxTokens: params.maxTokens ?? SELF_HOSTED_DEFAULT_MAX_TOKENS,
},
],
},
},
},
},
modelId: params.modelId,
modelRef,
profileId,
};
}
type OpenAICompatibleSelfHostedProviderSetupParams = {
cfg: OpenClawConfig;
prompter: WizardPrompter;
providerId: string;
providerLabel: string;
defaultBaseUrl: string;
defaultApiKeyEnvVar: string;
modelPlaceholder: string;
input?: Array<"text" | "image">;
reasoning?: boolean;
contextWindow?: number;
maxTokens?: number;
};
type OpenAICompatibleSelfHostedProviderPromptResult = {
config: OpenClawConfig;
credential: AuthProfileCredential;
modelId: string;
modelRef: string;
profileId: string;
};
function buildSelfHostedProviderAuthResult(
result: OpenAICompatibleSelfHostedProviderPromptResult,
): ProviderAuthResult {
return {
profiles: [
{
profileId: result.profileId,
credential: result.credential,
},
],
configPatch: result.config,
defaultModel: result.modelRef,
};
}
export async function promptAndConfigureOpenAICompatibleSelfHostedProvider(
params: OpenAICompatibleSelfHostedProviderSetupParams,
): Promise<OpenAICompatibleSelfHostedProviderPromptResult> {
const baseUrlRaw = await params.prompter.text({
message: `${params.providerLabel} base URL`,
initialValue: params.defaultBaseUrl,
placeholder: params.defaultBaseUrl,
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const apiKeyRaw = await params.prompter.text({
message: `${params.providerLabel} API key`,
placeholder: "sk-... (or any non-empty string)",
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const modelIdRaw = await params.prompter.text({
message: `${params.providerLabel} model`,
placeholder: params.modelPlaceholder,
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const baseUrl = String(baseUrlRaw ?? "")
.trim()
.replace(/\/+$/, "");
const apiKey = String(apiKeyRaw ?? "").trim();
const modelId = String(modelIdRaw ?? "").trim();
const credential: AuthProfileCredential = {
type: "api_key",
provider: params.providerId,
key: apiKey,
};
const configured = buildOpenAICompatibleSelfHostedProviderConfig({
cfg: params.cfg,
providerId: params.providerId,
baseUrl,
providerApiKey: params.defaultApiKeyEnvVar,
modelId,
input: params.input,
reasoning: params.reasoning,
contextWindow: params.contextWindow,
maxTokens: params.maxTokens,
});
return {
config: configured.config,
credential,
modelId: configured.modelId,
modelRef: configured.modelRef,
profileId: configured.profileId,
};
}
export async function promptAndConfigureOpenAICompatibleSelfHostedProviderAuth(
params: OpenAICompatibleSelfHostedProviderSetupParams,
): Promise<ProviderAuthResult> {
const result = await promptAndConfigureOpenAICompatibleSelfHostedProvider(params);
return buildSelfHostedProviderAuthResult(result);
}
export async function discoverOpenAICompatibleSelfHostedProvider<
T extends Record<string, unknown>,
>(params: {
ctx: ProviderDiscoveryContext;
providerId: string;
buildProvider: (params: { apiKey?: string }) => Promise<T>;
}): Promise<{ provider: T & { apiKey: string } } | null> {
if (params.ctx.config.models?.providers?.[params.providerId]) {
return null;
}
const { apiKey, discoveryApiKey } = params.ctx.resolveProviderApiKey(params.providerId);
if (!apiKey) {
return null;
}
return {
provider: {
...(await params.buildProvider({ apiKey: discoveryApiKey })),
apiKey,
},
};
}
function buildMissingNonInteractiveModelIdMessage(params: {
authChoice: string;
providerLabel: string;
modelPlaceholder: string;
}): string {
return [
`Missing --custom-model-id for --auth-choice ${params.authChoice}.`,
`Pass the ${params.providerLabel} model id to use, for example ${params.modelPlaceholder}.`,
].join("\n");
}
function buildSelfHostedProviderCredential(params: {
ctx: ProviderAuthMethodNonInteractiveContext;
providerId: string;
resolved: ProviderNonInteractiveApiKeyResult;
}): ApiKeyCredential | null {
return params.ctx.toApiKeyCredential({
provider: params.providerId,
resolved: params.resolved,
});
}
export async function configureOpenAICompatibleSelfHostedProviderNonInteractive(params: {
ctx: ProviderAuthMethodNonInteractiveContext;
providerId: string;
providerLabel: string;
defaultBaseUrl: string;
defaultApiKeyEnvVar: string;
modelPlaceholder: string;
input?: Array<"text" | "image">;
reasoning?: boolean;
contextWindow?: number;
maxTokens?: number;
}): Promise<OpenClawConfig | null> {
const baseUrl = (
normalizeOptionalSecretInput(params.ctx.opts.customBaseUrl) ?? params.defaultBaseUrl
).replace(/\/+$/, "");
const modelId = normalizeOptionalSecretInput(params.ctx.opts.customModelId);
if (!modelId) {
params.ctx.runtime.error(
buildMissingNonInteractiveModelIdMessage({
authChoice: params.ctx.authChoice,
providerLabel: params.providerLabel,
modelPlaceholder: params.modelPlaceholder,
}),
);
params.ctx.runtime.exit(1);
return null;
}
const resolved = await params.ctx.resolveApiKey({
provider: params.providerId,
flagValue: normalizeOptionalSecretInput(params.ctx.opts.customApiKey),
flagName: "--custom-api-key",
envVar: params.defaultApiKeyEnvVar,
envVarName: params.defaultApiKeyEnvVar,
});
if (!resolved) {
return null;
}
const credential = buildSelfHostedProviderCredential({
ctx: params.ctx,
providerId: params.providerId,
resolved,
});
if (!credential) {
return null;
}
const configured = buildOpenAICompatibleSelfHostedProviderConfig({
cfg: params.ctx.config,
providerId: params.providerId,
baseUrl,
providerApiKey: params.defaultApiKeyEnvVar,
modelId,
input: params.input,
reasoning: params.reasoning,
contextWindow: params.contextWindow,
maxTokens: params.maxTokens,
});
await upsertAuthProfileWithLock({
profileId: configured.profileId,
credential,
agentDir: params.ctx.agentDir,
});
const withProfile = applyAuthProfileConfig(configured.config, {
profileId: configured.profileId,
provider: params.providerId,
mode: "api_key",
});
params.ctx.runtime.log(`Default ${params.providerLabel} model: ${modelId}`);
return applyProviderDefaultModel(withProfile, configured.modelRef);
}

View File

@ -0,0 +1,42 @@
import {
VLLM_DEFAULT_API_KEY_ENV_VAR,
VLLM_DEFAULT_BASE_URL,
VLLM_MODEL_PLACEHOLDER,
VLLM_PROVIDER_LABEL,
} from "../agents/vllm-defaults.js";
import type { OpenClawConfig } from "../config/config.js";
import type { WizardPrompter } from "../wizard/prompts.js";
import {
applyProviderDefaultModel,
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
promptAndConfigureOpenAICompatibleSelfHostedProvider,
} from "./provider-self-hosted-setup.js";
export { VLLM_DEFAULT_BASE_URL } from "../agents/vllm-defaults.js";
export const VLLM_DEFAULT_CONTEXT_WINDOW = SELF_HOSTED_DEFAULT_CONTEXT_WINDOW;
export const VLLM_DEFAULT_MAX_TOKENS = SELF_HOSTED_DEFAULT_MAX_TOKENS;
export const VLLM_DEFAULT_COST = SELF_HOSTED_DEFAULT_COST;
export async function promptAndConfigureVllm(params: {
cfg: OpenClawConfig;
prompter: WizardPrompter;
}): Promise<{ config: OpenClawConfig; modelId: string; modelRef: string }> {
const result = await promptAndConfigureOpenAICompatibleSelfHostedProvider({
cfg: params.cfg,
prompter: params.prompter,
providerId: "vllm",
providerLabel: VLLM_PROVIDER_LABEL,
defaultBaseUrl: VLLM_DEFAULT_BASE_URL,
defaultApiKeyEnvVar: VLLM_DEFAULT_API_KEY_ENV_VAR,
modelPlaceholder: VLLM_MODEL_PLACEHOLDER,
});
return {
config: result.config,
modelId: result.modelId,
modelRef: result.modelRef,
};
}
export { applyProviderDefaultModel as applyVllmDefaultModel };

View File

@ -0,0 +1,179 @@
import {
ZAI_CN_BASE_URL,
ZAI_CODING_CN_BASE_URL,
ZAI_CODING_GLOBAL_BASE_URL,
ZAI_GLOBAL_BASE_URL,
} from "../../extensions/zai/model-definitions.js";
import { fetchWithTimeout } from "../utils/fetch-timeout.js";
export type ZaiEndpointId = "global" | "cn" | "coding-global" | "coding-cn";
export type ZaiDetectedEndpoint = {
endpoint: ZaiEndpointId;
/** Provider baseUrl to store in config. */
baseUrl: string;
/** Recommended default model id for that endpoint. */
modelId: string;
/** Human-readable note explaining the choice. */
note: string;
};
type ProbeResult =
| { ok: true }
| {
ok: false;
status?: number;
errorCode?: string;
errorMessage?: string;
};
async function probeZaiChatCompletions(params: {
baseUrl: string;
apiKey: string;
modelId: string;
timeoutMs: number;
fetchFn?: typeof fetch;
}): Promise<ProbeResult> {
try {
const res = await fetchWithTimeout(
`${params.baseUrl}/chat/completions`,
{
method: "POST",
headers: {
authorization: `Bearer ${params.apiKey}`,
"content-type": "application/json",
},
body: JSON.stringify({
model: params.modelId,
stream: false,
max_tokens: 1,
messages: [{ role: "user", content: "ping" }],
}),
},
params.timeoutMs,
params.fetchFn,
);
if (res.ok) {
return { ok: true };
}
let errorCode: string | undefined;
let errorMessage: string | undefined;
try {
const json = (await res.json()) as {
error?: { code?: unknown; message?: unknown };
msg?: unknown;
message?: unknown;
};
const code = json?.error?.code;
const msg = json?.error?.message ?? json?.msg ?? json?.message;
if (typeof code === "string") {
errorCode = code;
} else if (typeof code === "number") {
errorCode = String(code);
}
if (typeof msg === "string") {
errorMessage = msg;
}
} catch {
// ignore
}
return { ok: false, status: res.status, errorCode, errorMessage };
} catch {
return { ok: false };
}
}
export async function detectZaiEndpoint(params: {
apiKey: string;
endpoint?: ZaiEndpointId;
timeoutMs?: number;
fetchFn?: typeof fetch;
}): Promise<ZaiDetectedEndpoint | null> {
// Never auto-probe in vitest; it would create flaky network behavior.
if (process.env.VITEST && !params.fetchFn) {
return null;
}
const timeoutMs = params.timeoutMs ?? 5_000;
const probeCandidates = (() => {
const general = [
{
endpoint: "global" as const,
baseUrl: ZAI_GLOBAL_BASE_URL,
modelId: "glm-5",
note: "Verified GLM-5 on global endpoint.",
},
{
endpoint: "cn" as const,
baseUrl: ZAI_CN_BASE_URL,
modelId: "glm-5",
note: "Verified GLM-5 on cn endpoint.",
},
];
const codingGlm5 = [
{
endpoint: "coding-global" as const,
baseUrl: ZAI_CODING_GLOBAL_BASE_URL,
modelId: "glm-5",
note: "Verified GLM-5 on coding-global endpoint.",
},
{
endpoint: "coding-cn" as const,
baseUrl: ZAI_CODING_CN_BASE_URL,
modelId: "glm-5",
note: "Verified GLM-5 on coding-cn endpoint.",
},
];
const codingFallback = [
{
endpoint: "coding-global" as const,
baseUrl: ZAI_CODING_GLOBAL_BASE_URL,
modelId: "glm-4.7",
note: "Coding Plan endpoint verified, but this key/plan does not expose GLM-5 there. Defaulting to GLM-4.7.",
},
{
endpoint: "coding-cn" as const,
baseUrl: ZAI_CODING_CN_BASE_URL,
modelId: "glm-4.7",
note: "Coding Plan CN endpoint verified, but this key/plan does not expose GLM-5 there. Defaulting to GLM-4.7.",
},
];
switch (params.endpoint) {
case "global":
return general.filter((candidate) => candidate.endpoint === "global");
case "cn":
return general.filter((candidate) => candidate.endpoint === "cn");
case "coding-global":
return [
...codingGlm5.filter((candidate) => candidate.endpoint === "coding-global"),
...codingFallback.filter((candidate) => candidate.endpoint === "coding-global"),
];
case "coding-cn":
return [
...codingGlm5.filter((candidate) => candidate.endpoint === "coding-cn"),
...codingFallback.filter((candidate) => candidate.endpoint === "coding-cn"),
];
default:
return [...general, ...codingGlm5, ...codingFallback];
}
})();
for (const candidate of probeCandidates) {
const result = await probeZaiChatCompletions({
baseUrl: candidate.baseUrl,
apiKey: params.apiKey,
modelId: candidate.modelId,
timeoutMs,
fetchFn: params.fetchFn,
});
if (result.ok) {
return candidate;
}
}
return null;
}

View File

@ -0,0 +1,36 @@
import fs from "node:fs/promises";
import path from "node:path";
import { isSafeExecutableValue } from "../infra/exec-safety.js";
import { runCommandWithTimeout } from "../process/exec.js";
import { resolveUserPath } from "../utils.js";
export async function detectBinary(name: string): Promise<boolean> {
if (!name?.trim()) {
return false;
}
if (!isSafeExecutableValue(name)) {
return false;
}
const resolved = name.startsWith("~") ? resolveUserPath(name) : name;
if (
path.isAbsolute(resolved) ||
resolved.startsWith(".") ||
resolved.includes("/") ||
resolved.includes("\\")
) {
try {
await fs.access(resolved);
return true;
} catch {
return false;
}
}
const command = process.platform === "win32" ? ["where", name] : ["/usr/bin/env", "which", name];
try {
const result = await runCommandWithTimeout(command, { timeoutMs: 2000 });
return result.code === 0 && result.stdout.trim().length > 0;
} catch {
return false;
}
}

View File

@ -0,0 +1,112 @@
import { isWSL, isWSLEnv } from "../infra/wsl.js";
import { runCommandWithTimeout } from "../process/exec.js";
import { detectBinary } from "./setup-binary.js";
function shouldSkipBrowserOpenInTests(): boolean {
if (process.env.VITEST) {
return true;
}
return process.env.NODE_ENV === "test";
}
type BrowserOpenCommand = {
argv: string[] | null;
command?: string;
quoteUrl?: boolean;
};
async function resolveBrowserOpenCommand(): Promise<BrowserOpenCommand> {
const platform = process.platform;
const hasDisplay = Boolean(process.env.DISPLAY || process.env.WAYLAND_DISPLAY);
const isSsh =
Boolean(process.env.SSH_CLIENT) ||
Boolean(process.env.SSH_TTY) ||
Boolean(process.env.SSH_CONNECTION);
if (isSsh && !hasDisplay && platform !== "win32") {
return { argv: null };
}
if (platform === "win32") {
return {
argv: ["cmd", "/c", "start", ""],
command: "cmd",
quoteUrl: true,
};
}
if (platform === "darwin") {
const hasOpen = await detectBinary("open");
return hasOpen ? { argv: ["open"], command: "open" } : { argv: null };
}
if (platform === "linux") {
const wsl = await isWSL();
if (!hasDisplay && !wsl) {
return { argv: null };
}
if (wsl) {
const hasWslview = await detectBinary("wslview");
if (hasWslview) {
return { argv: ["wslview"], command: "wslview" };
}
if (!hasDisplay) {
return { argv: null };
}
}
const hasXdgOpen = await detectBinary("xdg-open");
return hasXdgOpen ? { argv: ["xdg-open"], command: "xdg-open" } : { argv: null };
}
return { argv: null };
}
export function isRemoteEnvironment(): boolean {
if (process.env.SSH_CLIENT || process.env.SSH_TTY || process.env.SSH_CONNECTION) {
return true;
}
if (process.env.REMOTE_CONTAINERS || process.env.CODESPACES) {
return true;
}
if (
process.platform === "linux" &&
!process.env.DISPLAY &&
!process.env.WAYLAND_DISPLAY &&
!isWSLEnv()
) {
return true;
}
return false;
}
export async function openUrl(url: string): Promise<boolean> {
if (shouldSkipBrowserOpenInTests()) {
return false;
}
const resolved = await resolveBrowserOpenCommand();
if (!resolved.argv) {
return false;
}
const quoteUrl = resolved.quoteUrl === true;
const command = [...resolved.argv];
if (quoteUrl) {
if (command.at(-1) === "") {
command[command.length - 1] = '""';
}
command.push(`"${url}"`);
} else {
command.push(url);
}
try {
await runCommandWithTimeout(command, {
timeoutMs: 5_000,
windowsVerbatimArguments: quoteUrl,
});
return true;
} catch {
return false;
}
}

View File

@ -0,0 +1,302 @@
import { createWriteStream } from "node:fs";
import fs from "node:fs/promises";
import { request } from "node:https";
import os from "node:os";
import path from "node:path";
import { pipeline } from "node:stream/promises";
import { extractArchive } from "../infra/archive.js";
import { resolveBrewExecutable } from "../infra/brew.js";
import { runCommandWithTimeout } from "../process/exec.js";
import type { RuntimeEnv } from "../runtime.js";
import { CONFIG_DIR } from "../utils.js";
export type ReleaseAsset = {
name?: string;
browser_download_url?: string;
};
export type NamedAsset = {
name: string;
browser_download_url: string;
};
type ReleaseResponse = {
tag_name?: string;
assets?: ReleaseAsset[];
};
export type SignalInstallResult = {
ok: boolean;
cliPath?: string;
version?: string;
error?: string;
};
/** @internal Exported for testing. */
export async function extractSignalCliArchive(
archivePath: string,
installRoot: string,
timeoutMs: number,
): Promise<void> {
await extractArchive({ archivePath, destDir: installRoot, timeoutMs });
}
/** @internal Exported for testing. */
export function looksLikeArchive(name: string): boolean {
return name.endsWith(".tar.gz") || name.endsWith(".tgz") || name.endsWith(".zip");
}
/**
* Pick a native release asset from the official GitHub releases.
*
* The official signal-cli releases only publish native (GraalVM) binaries for
* x86-64 Linux. On architectures where no native asset is available this
* returns `undefined` so the caller can fall back to a different install
* strategy (e.g. Homebrew).
*/
/** @internal Exported for testing. */
export function pickAsset(
assets: ReleaseAsset[],
platform: NodeJS.Platform,
arch: string,
): NamedAsset | undefined {
const withName = assets.filter((asset): asset is NamedAsset =>
Boolean(asset.name && asset.browser_download_url),
);
// Archives only, excluding signature files (.asc)
const archives = withName.filter((a) => looksLikeArchive(a.name.toLowerCase()));
const byName = (pattern: RegExp) =>
archives.find((asset) => pattern.test(asset.name.toLowerCase()));
if (platform === "linux") {
// The official "Linux-native" asset is an x86-64 GraalVM binary.
// On non-x64 architectures it will fail with "Exec format error",
// so only select it when the host architecture matches.
if (arch === "x64") {
return byName(/linux-native/) || byName(/linux/) || archives[0];
}
// No native release for this arch — caller should fall back.
return undefined;
}
if (platform === "darwin") {
return byName(/macos|osx|darwin/) || archives[0];
}
if (platform === "win32") {
return byName(/windows|win/) || archives[0];
}
return archives[0];
}
async function downloadToFile(url: string, dest: string, maxRedirects = 5): Promise<void> {
await new Promise<void>((resolve, reject) => {
const req = request(url, (res) => {
if (res.statusCode && res.statusCode >= 300 && res.statusCode < 400) {
const location = res.headers.location;
if (!location || maxRedirects <= 0) {
reject(new Error("Redirect loop or missing Location header"));
return;
}
const redirectUrl = new URL(location, url).href;
resolve(downloadToFile(redirectUrl, dest, maxRedirects - 1));
return;
}
if (!res.statusCode || res.statusCode >= 400) {
reject(new Error(`HTTP ${res.statusCode ?? "?"} downloading file`));
return;
}
const out = createWriteStream(dest);
pipeline(res, out).then(resolve).catch(reject);
});
req.on("error", reject);
req.end();
});
}
async function findSignalCliBinary(root: string): Promise<string | null> {
const candidates: string[] = [];
const enqueue = async (dir: string, depth: number) => {
if (depth > 3) {
return;
}
const entries = await fs.readdir(dir, { withFileTypes: true }).catch(() => []);
for (const entry of entries) {
const full = path.join(dir, entry.name);
if (entry.isDirectory()) {
await enqueue(full, depth + 1);
} else if (entry.isFile() && entry.name === "signal-cli") {
candidates.push(full);
}
}
};
await enqueue(root, 0);
return candidates[0] ?? null;
}
// ---------------------------------------------------------------------------
// Brew-based install (used on architectures without an official native build)
// ---------------------------------------------------------------------------
async function resolveBrewSignalCliPath(brewExe: string): Promise<string | null> {
try {
const result = await runCommandWithTimeout([brewExe, "--prefix", "signal-cli"], {
timeoutMs: 10_000,
});
if (result.code === 0 && result.stdout.trim()) {
const prefix = result.stdout.trim();
// Homebrew installs the wrapper script at <prefix>/bin/signal-cli
const candidate = path.join(prefix, "bin", "signal-cli");
try {
await fs.access(candidate);
return candidate;
} catch {
// Fall back to searching the prefix
return findSignalCliBinary(prefix);
}
}
} catch {
// ignore
}
return null;
}
async function installSignalCliViaBrew(runtime: RuntimeEnv): Promise<SignalInstallResult> {
const brewExe = resolveBrewExecutable();
if (!brewExe) {
return {
ok: false,
error:
`No native signal-cli build is available for ${process.arch}. ` +
"Install Homebrew (https://brew.sh) and try again, or install signal-cli manually.",
};
}
runtime.log(`Installing signal-cli via Homebrew (${brewExe})…`);
const result = await runCommandWithTimeout([brewExe, "install", "signal-cli"], {
timeoutMs: 15 * 60_000, // brew builds from source; can take a while
});
if (result.code !== 0) {
return {
ok: false,
error: `brew install signal-cli failed (exit ${result.code}): ${result.stderr.trim().slice(0, 200)}`,
};
}
const cliPath = await resolveBrewSignalCliPath(brewExe);
if (!cliPath) {
return {
ok: false,
error: "brew install succeeded but signal-cli binary was not found.",
};
}
// Extract version from the installed binary.
let version: string | undefined;
try {
const vResult = await runCommandWithTimeout([cliPath, "--version"], {
timeoutMs: 10_000,
});
// Output is typically "signal-cli 0.13.24"
version = vResult.stdout.trim().replace(/^signal-cli\s+/, "") || undefined;
} catch {
// non-critical; leave version undefined
}
return { ok: true, cliPath, version };
}
// ---------------------------------------------------------------------------
// Direct download install (used when an official native asset is available)
// ---------------------------------------------------------------------------
async function installSignalCliFromRelease(runtime: RuntimeEnv): Promise<SignalInstallResult> {
const apiUrl = "https://api.github.com/repos/AsamK/signal-cli/releases/latest";
const response = await fetch(apiUrl, {
headers: {
"User-Agent": "openclaw",
Accept: "application/vnd.github+json",
},
});
if (!response.ok) {
return {
ok: false,
error: `Failed to fetch release info (${response.status})`,
};
}
const payload = (await response.json()) as ReleaseResponse;
const version = payload.tag_name?.replace(/^v/, "") ?? "unknown";
const assets = payload.assets ?? [];
const asset = pickAsset(assets, process.platform, process.arch);
if (!asset) {
return {
ok: false,
error: "No compatible release asset found for this platform.",
};
}
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-signal-"));
const archivePath = path.join(tmpDir, asset.name);
runtime.log(`Downloading signal-cli ${version} (${asset.name})…`);
await downloadToFile(asset.browser_download_url, archivePath);
const installRoot = path.join(CONFIG_DIR, "tools", "signal-cli", version);
await fs.mkdir(installRoot, { recursive: true });
if (!looksLikeArchive(asset.name.toLowerCase())) {
return { ok: false, error: `Unsupported archive type: ${asset.name}` };
}
try {
await extractSignalCliArchive(archivePath, installRoot, 60_000);
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
return {
ok: false,
error: `Failed to extract ${asset.name}: ${message}`,
};
}
const cliPath = await findSignalCliBinary(installRoot);
if (!cliPath) {
return {
ok: false,
error: `signal-cli binary not found after extracting ${asset.name}`,
};
}
await fs.chmod(cliPath, 0o755).catch(() => {});
return { ok: true, cliPath, version };
}
// ---------------------------------------------------------------------------
// Public entry point
// ---------------------------------------------------------------------------
export async function installSignalCli(runtime: RuntimeEnv): Promise<SignalInstallResult> {
if (process.platform === "win32") {
return {
ok: false,
error: "Signal CLI auto-install is not supported on Windows yet.",
};
}
// The official signal-cli GitHub releases only ship a native binary for
// x86-64 Linux. On other architectures (arm64, armv7, etc.) we delegate
// to Homebrew which builds from source and bundles the JRE automatically.
const hasNativeRelease = process.platform !== "linux" || process.arch === "x64";
if (hasNativeRelease) {
return installSignalCliFromRelease(runtime);
}
return installSignalCliViaBrew(runtime);
}