Merge branch 'main' into fix/50713-ollama-tool-call-arguments

This commit is contained in:
LIU Yaohua 2026-03-20 10:44:33 +08:00 committed by GitHub
commit dd56822c71
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
175 changed files with 12069 additions and 4529 deletions

View File

@ -0,0 +1,71 @@
---
name: openclaw-test-heap-leaks
description: Investigate `pnpm test` memory growth, Vitest worker OOMs, and suspicious RSS increases in OpenClaw using the `scripts/test-parallel.mjs` heap snapshot tooling. Use when Codex needs to reproduce test-lane memory growth, collect repeated `.heapsnapshot` files, compare snapshots from the same worker PID, distinguish transformed-module retention from real data leaks, and fix or reduce the impact by patching cleanup logic or isolating hotspot tests.
---
# OpenClaw Test Heap Leaks
Use this skill for test-memory investigations. Do not guess from RSS alone when heap snapshots are available.
## Workflow
1. Reproduce the failing shape first.
- Match the real entrypoint if possible. For Linux CI-style unit failures, start with:
- `pnpm canvas:a2ui:bundle && OPENCLAW_TEST_MEMORY_TRACE=1 OPENCLAW_TEST_HEAPSNAPSHOT_INTERVAL_MS=60000 OPENCLAW_TEST_HEAPSNAPSHOT_DIR=.tmp/heapsnap OPENCLAW_TEST_WORKERS=2 OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144 pnpm test`
- Keep `OPENCLAW_TEST_MEMORY_TRACE=1` enabled so the wrapper prints per-file RSS summaries alongside the snapshots.
- If the report is about a specific shard or worker budget, preserve that shape.
2. Wait for repeated snapshots before concluding anything.
- Take at least two intervals from the same lane.
- Compare snapshots from the same PID inside one lane directory such as `.tmp/heapsnap/unit-fast/`.
- Use `scripts/heapsnapshot-delta.mjs` to compare either two files directly or the earliest/latest pair per PID in one lane directory.
3. Classify the growth before choosing a fix.
- If growth is dominated by Vite/Vitest transformed source strings, `Module`, `system / Context`, bytecode, descriptor arrays, or property maps, treat it as retained module graph growth in long-lived workers.
- If growth is dominated by app objects, caches, buffers, server handles, timers, mock state, sqlite state, or similar runtime objects, treat it as a likely cleanup or lifecycle leak.
4. Fix the right layer.
- For retained transformed-module growth in shared workers:
- Move hotspot files out of `unit-fast` by updating `test/fixtures/test-parallel.behavior.json`.
- Prefer `singletonIsolated` for files that are safe alone but inflate shared worker heaps.
- If the file should already have been peeled out by timings but is absent from `test/fixtures/test-timings.unit.json`, call that out explicitly. Missing timings are a scheduling blind spot.
- For real leaks:
- Patch the implicated test or runtime cleanup path.
- Look for missing `afterEach`/`afterAll`, module-reset gaps, retained global state, unreleased DB handles, or listeners/timers that survive the file.
5. Verify with the most direct proof.
- Re-run the targeted lane or file with heap snapshots enabled if the suite still finishes in reasonable time.
- If snapshot overhead pushes tests over Vitest timeouts, fall back to the same lane without snapshots and confirm the RSS trend or OOM is reduced.
- For wrapper-only changes, at minimum verify the expected lanes start and the snapshot files are written.
## Heuristics
- Do not call everything a leak. In this repo, large `unit-fast` growth can be a worker-lifetime problem rather than an application object leak.
- `scripts/test-parallel.mjs` and `scripts/test-parallel-memory.mjs` are the primary control points for wrapper diagnostics.
- The lane names printed by `[test-parallel] start ...` and `[test-parallel][mem] summary ...` tell you where to focus.
- When one or two files account for most of the delta and they are missing from timings, reducing impact by isolating them is usually the first pragmatic fix.
- When the same retained object families grow across multiple intervals in the same worker PID, trust the snapshots over intuition.
## Snapshot Comparison
- Direct comparison:
- `node .agents/skills/openclaw-test-heap-leaks/scripts/heapsnapshot-delta.mjs before.heapsnapshot after.heapsnapshot`
- Auto-select earliest/latest snapshots per PID within one lane:
- `node .agents/skills/openclaw-test-heap-leaks/scripts/heapsnapshot-delta.mjs --lane-dir .tmp/heapsnap/unit-fast`
- Useful flags:
- `--top 40`
- `--min-kb 32`
- `--pid 16133`
Read the top positive deltas first. Large positive growth in module-transform artifacts suggests lane isolation; large positive growth in runtime objects suggests a real leak.
## Output Expectations
When using this skill, report:
- The exact reproduce command.
- Which lane and PID were compared.
- The dominant retained object families from the snapshot delta.
- Whether the issue is a real leak or shared-worker retained module growth.
- The concrete fix or impact-reduction patch.
- What you verified, and what snapshot overhead prevented you from verifying.

View File

@ -0,0 +1,4 @@
interface:
display_name: "Test Heap Leaks"
short_description: "Investigate test OOMs with heap snapshots"
default_prompt: "Use $openclaw-test-heap-leaks to investigate test memory growth with heap snapshots and reduce its impact."

View File

@ -0,0 +1,265 @@
#!/usr/bin/env node
import fs from "node:fs";
import path from "node:path";
function printUsage() {
console.error(
"Usage: node heapsnapshot-delta.mjs <before.heapsnapshot> <after.heapsnapshot> [--top N] [--min-kb N]",
);
console.error(
" or: node heapsnapshot-delta.mjs --lane-dir <dir> [--pid PID] [--top N] [--min-kb N]",
);
}
function fail(message) {
console.error(message);
process.exit(1);
}
function parseArgs(argv) {
const options = {
top: 30,
minKb: 64,
laneDir: null,
pid: null,
files: [],
};
for (let index = 0; index < argv.length; index += 1) {
const arg = argv[index];
if (arg === "--top") {
options.top = Number.parseInt(argv[index + 1] ?? "", 10);
index += 1;
continue;
}
if (arg === "--min-kb") {
options.minKb = Number.parseInt(argv[index + 1] ?? "", 10);
index += 1;
continue;
}
if (arg === "--lane-dir") {
options.laneDir = argv[index + 1] ?? null;
index += 1;
continue;
}
if (arg === "--pid") {
options.pid = Number.parseInt(argv[index + 1] ?? "", 10);
index += 1;
continue;
}
options.files.push(arg);
}
if (!Number.isFinite(options.top) || options.top <= 0) {
fail("--top must be a positive integer");
}
if (!Number.isFinite(options.minKb) || options.minKb < 0) {
fail("--min-kb must be a non-negative integer");
}
if (options.pid !== null && (!Number.isInteger(options.pid) || options.pid <= 0)) {
fail("--pid must be a positive integer");
}
return options;
}
function parseHeapFilename(filePath) {
const base = path.basename(filePath);
const match = base.match(
/^Heap\.(?<stamp>\d{8}\.\d{6})\.(?<pid>\d+)\.0\.(?<seq>\d+)\.heapsnapshot$/u,
);
if (!match?.groups) {
return null;
}
return {
filePath,
pid: Number.parseInt(match.groups.pid, 10),
stamp: match.groups.stamp,
sequence: Number.parseInt(match.groups.seq, 10),
};
}
function resolvePair(options) {
if (options.laneDir) {
const entries = fs
.readdirSync(options.laneDir)
.map((name) => parseHeapFilename(path.join(options.laneDir, name)))
.filter((entry) => entry !== null)
.filter((entry) => options.pid === null || entry.pid === options.pid)
.toSorted((left, right) => {
if (left.pid !== right.pid) {
return left.pid - right.pid;
}
if (left.stamp !== right.stamp) {
return left.stamp.localeCompare(right.stamp);
}
return left.sequence - right.sequence;
});
if (entries.length === 0) {
fail(`No matching heap snapshots found in ${options.laneDir}`);
}
const groups = new Map();
for (const entry of entries) {
const group = groups.get(entry.pid) ?? [];
group.push(entry);
groups.set(entry.pid, group);
}
const candidates = Array.from(groups.values())
.map((group) => ({
pid: group[0].pid,
before: group[0],
after: group.at(-1),
count: group.length,
}))
.filter((entry) => entry.count >= 2);
if (candidates.length === 0) {
fail(`Need at least two snapshots for one PID in ${options.laneDir}`);
}
const chosen =
options.pid !== null
? (candidates.find((entry) => entry.pid === options.pid) ?? null)
: candidates.toSorted((left, right) => right.count - left.count || left.pid - right.pid)[0];
if (!chosen) {
fail(`No PID with at least two snapshots matched in ${options.laneDir}`);
}
return {
before: chosen.before.filePath,
after: chosen.after.filePath,
pid: chosen.pid,
snapshotCount: chosen.count,
};
}
if (options.files.length !== 2) {
printUsage();
process.exit(1);
}
return {
before: options.files[0],
after: options.files[1],
pid: null,
snapshotCount: 2,
};
}
function loadSummary(filePath) {
const data = JSON.parse(fs.readFileSync(filePath, "utf8"));
const meta = data.snapshot?.meta;
if (!meta) {
fail(`Invalid heap snapshot: ${filePath}`);
}
const nodeFieldCount = meta.node_fields.length;
const typeNames = meta.node_types[0];
const strings = data.strings;
const typeIndex = meta.node_fields.indexOf("type");
const nameIndex = meta.node_fields.indexOf("name");
const selfSizeIndex = meta.node_fields.indexOf("self_size");
const summary = new Map();
for (let offset = 0; offset < data.nodes.length; offset += nodeFieldCount) {
const type = typeNames[data.nodes[offset + typeIndex]];
const name = strings[data.nodes[offset + nameIndex]];
const selfSize = data.nodes[offset + selfSizeIndex];
const key = `${type}\t${name}`;
const current = summary.get(key) ?? {
type,
name,
selfSize: 0,
count: 0,
};
current.selfSize += selfSize;
current.count += 1;
summary.set(key, current);
}
return {
nodeCount: data.snapshot.node_count,
summary,
};
}
function formatBytes(bytes) {
if (Math.abs(bytes) >= 1024 ** 2) {
return `${(bytes / 1024 ** 2).toFixed(2)} MiB`;
}
if (Math.abs(bytes) >= 1024) {
return `${(bytes / 1024).toFixed(1)} KiB`;
}
return `${bytes} B`;
}
function formatDelta(bytes) {
return `${bytes >= 0 ? "+" : "-"}${formatBytes(Math.abs(bytes))}`;
}
function truncate(text, maxLength) {
return text.length <= maxLength ? text : `${text.slice(0, maxLength - 1)}`;
}
function main() {
const options = parseArgs(process.argv.slice(2));
const pair = resolvePair(options);
const before = loadSummary(pair.before);
const after = loadSummary(pair.after);
const minBytes = options.minKb * 1024;
const rows = [];
for (const [key, next] of after.summary) {
const previous = before.summary.get(key) ?? { selfSize: 0, count: 0 };
const sizeDelta = next.selfSize - previous.selfSize;
const countDelta = next.count - previous.count;
if (sizeDelta < minBytes) {
continue;
}
rows.push({
type: next.type,
name: next.name,
sizeDelta,
countDelta,
afterSize: next.selfSize,
afterCount: next.count,
});
}
rows.sort(
(left, right) => right.sizeDelta - left.sizeDelta || right.countDelta - left.countDelta,
);
console.log(`before: ${pair.before}`);
console.log(`after: ${pair.after}`);
if (pair.pid !== null) {
console.log(`pid: ${pair.pid} (${pair.snapshotCount} snapshots found)`);
}
console.log(
`nodes: ${before.nodeCount} -> ${after.nodeCount} (${after.nodeCount - before.nodeCount >= 0 ? "+" : ""}${after.nodeCount - before.nodeCount})`,
);
console.log(`filter: top=${options.top} min=${options.minKb} KiB`);
console.log("");
if (rows.length === 0) {
console.log("No entries exceeded the minimum delta.");
return;
}
for (const row of rows.slice(0, options.top)) {
console.log(
[
formatDelta(row.sizeDelta).padStart(11),
`count ${row.countDelta >= 0 ? "+" : ""}${row.countDelta}`.padStart(10),
row.type.padEnd(16),
truncate(row.name || "(empty)", 96),
].join(" "),
);
}
}
main();

View File

@ -1,7 +1,7 @@
.git
.worktrees
# Sensitive files docker-setup.sh writes .env with OPENCLAW_GATEWAY_TOKEN
# Sensitive files scripts/docker/setup.sh writes .env with OPENCLAW_GATEWAY_TOKEN
# into the project root; keep it out of the build context.
.env
.env.*

3
.github/labeler.yml vendored
View File

@ -165,7 +165,10 @@
- "Dockerfile.*"
- "docker-compose.yml"
- "docker-setup.sh"
- "setup-podman.sh"
- ".dockerignore"
- "scripts/docker/setup.sh"
- "scripts/podman/setup.sh"
- "scripts/**/*docker*"
- "scripts/**/Dockerfile*"
- "scripts/sandbox-*.sh"

View File

@ -70,9 +70,8 @@
- Format check: `pnpm format` (oxfmt --check)
- Format fix: `pnpm format:fix` (oxfmt --write)
- Tests: `pnpm test` (vitest); coverage: `pnpm test:coverage`
- Default landing bar: before any commit, run `pnpm check` and prefer a passing result for the change being committed.
- For narrowly scoped changes, run narrowly scoped tests that directly validate the touched behavior; this is required proof for the change before commit and push decisions. If no meaningful scoped test exists, say so explicitly and use the next most direct validation available.
- Default landing bar: before any push to `main`, run `pnpm check` and `pnpm test` and prefer a green result.
- For narrowly scoped changes, prefer narrowly scoped tests that directly validate the touched behavior. If no meaningful scoped test exists, say so explicitly and use the next most direct validation available.
- Preferred landing bar for pushes to `main`: `pnpm check` and `pnpm test`, with a green result when feasible.
- Scoped tests prove the change itself. `pnpm test` remains the default `main` landing bar; scoped tests do not replace full-suite gates by default.
- Hard gate: if the change can affect build output, packaging, lazy-loading/module boundaries, or published surfaces, `pnpm build` MUST be run and MUST pass before pushing `main`.
- Default rule: do not commit or push with failing format, lint, type, build, or required test checks when those failures are caused by the change or plausibly related to the touched surface.
@ -82,7 +81,7 @@
## Coding Style & Naming Conventions
- Language: TypeScript (ESM). Prefer strict typing; avoid `any`.
- Formatting/linting via Oxlint and Oxfmt; run `pnpm check` before commits.
- Formatting/linting via Oxlint and Oxfmt.
- Never add `@ts-nocheck` and do not disable `no-explicit-any`; fix root causes and update Oxlint/Oxfmt config only when required.
- Dynamic import guardrail: do not mix `await import("x")` and static `import ... from "x"` for the same module in production code paths. If you need lazy loading, create a dedicated `*.runtime.ts` boundary (that re-exports from `x`) and dynamically import that boundary from lazy callers only.
- Dynamic import verification: after refactors that touch lazy-loading/module boundaries, run `pnpm build` and check for `[INEFFECTIVE_DYNAMIC_IMPORT]` warnings before submitting.

View File

@ -44,6 +44,7 @@ Docs: https://docs.openclaw.ai
- Control UI/chat: add an expand-to-canvas button on assistant chat bubbles and in-app session navigation from Sessions and Cron views. Thanks @BunsDev.
- Plugins/context engines: expose `delegateCompactionToRuntime(...)` on the public plugin SDK, refactor the legacy engine to use the shared helper, and clarify `ownsCompaction` delegation semantics for non-owning engines. (#49061) Thanks @jalehman.
- Plugins/MiniMax: add MiniMax-M2.7 and MiniMax-M2.7-highspeed models and update the default model from M2.5 to M2.7. (#49691) Thanks @liyuan97.
- Plugins/Xiaomi: switch the bundled Xiaomi provider to the `/v1` OpenAI-compatible endpoint and add MiMo V2 Pro plus MiMo V2 Omni to the built-in catalog. (#49214) thanks @DJjjjhao.
### Fixes
@ -139,6 +140,7 @@ Docs: https://docs.openclaw.ai
- Discord: enforce strict DM component allowlist auth (#49997) Thanks @joshavant.
- Stabilize plugin loader and Docker extension smoke (#50058) Thanks @joshavant.
- Telegram: stabilize pairing/session/forum routing and reply formatting tests (#50155) Thanks @joshavant.
- Hardening: refresh stale device pairing requests and pending metadata (#50695) Thanks @smaeljaish771 and @joshavant.
### Fixes
@ -166,6 +168,8 @@ Docs: https://docs.openclaw.ai
- Plugins/WhatsApp: share split-load singleton state for plugin command registration and active WhatsApp listeners so duplicate module graphs no longer lose native plugin commands or outbound listener state. (#50418) Thanks @huntharo.
- Onboarding/custom providers: keep Azure AI Foundry `*.services.ai.azure.com` custom endpoints on the selected compatibility path instead of forcing Responses, so chat-completions Foundry models still work after setup. Fixes #50528. (#50535) Thanks @obviyus.
- Plugins/update: let `openclaw plugins update <npm-spec>` target tracked npm installs by dist-tag or exact version, and preserve the recorded npm spec for later id-based updates. (#49998) Thanks @huntharo.
- Tests/CLI: reduce command-secret gateway test import pressure while keeping the real protocol payload validator in place, so the isolated lane no longer carries the heavier runtime-web and message-channel graphs. (#50663) Thanks @huntharo.
- Gateway/plugins: share plugin interactive callback routing and plugin bind approval state across duplicate module graphs so Telegram Codex picker buttons and plugin bind approvals no longer fall through to normal inbound message routing. (#50722) Thanks @huntharo.
### Breaking
@ -178,6 +182,7 @@ Docs: https://docs.openclaw.ai
- Plugins/message discovery: require `ChannelMessageActionAdapter.describeMessageTool(...)` for shared `message` tool discovery. The legacy `listActions`, `getCapabilities`, and `getToolSchema` adapter methods are removed. Plugin authors should migrate message discovery to `describeMessageTool(...)` and keep channel-specific action runtime code inside the owning plugin package. Thanks @gumadeiras.
- Exec/env sandbox: block build-tool JVM injection (`MAVEN_OPTS`, `SBT_OPTS`, `GRADLE_OPTS`, `ANT_OPTS`), glibc tunable exploitation (`GLIBC_TUNABLES`), and .NET dependency resolution hijack (`DOTNET_ADDITIONAL_DEPS`) from the host exec environment, and restrict Gradle init script redirect (`GRADLE_USER_HOME`) as an override-only block so user-configured Gradle homes still propagate. (#49702)
- Plugins/Matrix: add a new Matrix plugin backed by the official `matrix-js-sdk`. If you are upgrading from the previous public Matrix plugin, follow the migration guide: https://docs.openclaw.ai/install/migrating-matrix Thanks @gumadeiras.
- Discord/commands: switch native command deployment to Carbon reconcile by default so Discord restarts stop churning slash commands through OpenClaws local deploy path. (#46597) Thanks @huntharo and @thewilloftheshadow.
## 2026.3.13

View File

@ -16,7 +16,7 @@ services:
## Uncomment the lines below to enable sandbox isolation
## (agents.defaults.sandbox). Requires Docker CLI in the image
## (build with --build-arg OPENCLAW_INSTALL_DOCKER_CLI=1) or use
## docker-setup.sh with OPENCLAW_SANDBOX=1 for automated setup.
## scripts/docker/setup.sh with OPENCLAW_SANDBOX=1 for automated setup.
## Set DOCKER_GID to the host's docker group GID (run: stat -c '%g' /var/run/docker.sock).
# - /var/run/docker.sock:/var/run/docker.sock
# group_add:

View File

@ -2,615 +2,11 @@
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
EXTRA_COMPOSE_FILE="$ROOT_DIR/docker-compose.extra.yml"
IMAGE_NAME="${OPENCLAW_IMAGE:-openclaw:local}"
EXTRA_MOUNTS="${OPENCLAW_EXTRA_MOUNTS:-}"
HOME_VOLUME_NAME="${OPENCLAW_HOME_VOLUME:-}"
RAW_SANDBOX_SETTING="${OPENCLAW_SANDBOX:-}"
SANDBOX_ENABLED=""
DOCKER_SOCKET_PATH="${OPENCLAW_DOCKER_SOCKET:-}"
TIMEZONE="${OPENCLAW_TZ:-}"
SCRIPT_PATH="$ROOT_DIR/scripts/docker/setup.sh"
fail() {
echo "ERROR: $*" >&2
exit 1
}
require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "Missing dependency: $1" >&2
exit 1
fi
}
is_truthy_value() {
local raw="${1:-}"
raw="$(printf '%s' "$raw" | tr '[:upper:]' '[:lower:]')"
case "$raw" in
1 | true | yes | on) return 0 ;;
*) return 1 ;;
esac
}
read_config_gateway_token() {
local config_path="$OPENCLAW_CONFIG_DIR/openclaw.json"
if [[ ! -f "$config_path" ]]; then
return 0
fi
if command -v python3 >/dev/null 2>&1; then
python3 - "$config_path" <<'PY'
import json
import sys
path = sys.argv[1]
try:
with open(path, "r", encoding="utf-8") as f:
cfg = json.load(f)
except Exception:
raise SystemExit(0)
gateway = cfg.get("gateway")
if not isinstance(gateway, dict):
raise SystemExit(0)
auth = gateway.get("auth")
if not isinstance(auth, dict):
raise SystemExit(0)
token = auth.get("token")
if isinstance(token, str):
token = token.strip()
if token:
print(token)
PY
return 0
fi
if command -v node >/dev/null 2>&1; then
node - "$config_path" <<'NODE'
const fs = require("node:fs");
const configPath = process.argv[2];
try {
const cfg = JSON.parse(fs.readFileSync(configPath, "utf8"));
const token = cfg?.gateway?.auth?.token;
if (typeof token === "string" && token.trim().length > 0) {
process.stdout.write(token.trim());
}
} catch {
// Keep docker-setup resilient when config parsing fails.
}
NODE
fi
}
read_env_gateway_token() {
local env_path="$1"
local line=""
local token=""
if [[ ! -f "$env_path" ]]; then
return 0
fi
while IFS= read -r line || [[ -n "$line" ]]; do
line="${line%$'\r'}"
if [[ "$line" == OPENCLAW_GATEWAY_TOKEN=* ]]; then
token="${line#OPENCLAW_GATEWAY_TOKEN=}"
fi
done <"$env_path"
if [[ -n "$token" ]]; then
printf '%s' "$token"
fi
}
ensure_control_ui_allowed_origins() {
if [[ "${OPENCLAW_GATEWAY_BIND}" == "loopback" ]]; then
return 0
fi
local allowed_origin_json
local current_allowed_origins
allowed_origin_json="$(printf '["http://127.0.0.1:%s"]' "$OPENCLAW_GATEWAY_PORT")"
current_allowed_origins="$(
docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \
config get gateway.controlUi.allowedOrigins 2>/dev/null || true
)"
current_allowed_origins="${current_allowed_origins//$'\r'/}"
if [[ -n "$current_allowed_origins" && "$current_allowed_origins" != "null" && "$current_allowed_origins" != "[]" ]]; then
echo "Control UI allowlist already configured; leaving gateway.controlUi.allowedOrigins unchanged."
return 0
fi
docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \
config set gateway.controlUi.allowedOrigins "$allowed_origin_json" --strict-json >/dev/null
echo "Set gateway.controlUi.allowedOrigins to $allowed_origin_json for non-loopback bind."
}
sync_gateway_mode_and_bind() {
docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \
config set gateway.mode local >/dev/null
docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \
config set gateway.bind "$OPENCLAW_GATEWAY_BIND" >/dev/null
echo "Pinned gateway.mode=local and gateway.bind=$OPENCLAW_GATEWAY_BIND for Docker setup."
}
contains_disallowed_chars() {
local value="$1"
[[ "$value" == *$'\n'* || "$value" == *$'\r'* || "$value" == *$'\t'* ]]
}
is_valid_timezone() {
local value="$1"
[[ -e "/usr/share/zoneinfo/$value" && ! -d "/usr/share/zoneinfo/$value" ]]
}
validate_mount_path_value() {
local label="$1"
local value="$2"
if [[ -z "$value" ]]; then
fail "$label cannot be empty."
fi
if contains_disallowed_chars "$value"; then
fail "$label contains unsupported control characters."
fi
if [[ "$value" =~ [[:space:]] ]]; then
fail "$label cannot contain whitespace."
fi
}
validate_named_volume() {
local value="$1"
if [[ ! "$value" =~ ^[A-Za-z0-9][A-Za-z0-9_.-]*$ ]]; then
fail "OPENCLAW_HOME_VOLUME must match [A-Za-z0-9][A-Za-z0-9_.-]* when using a named volume."
fi
}
validate_mount_spec() {
local mount="$1"
if contains_disallowed_chars "$mount"; then
fail "OPENCLAW_EXTRA_MOUNTS entries cannot contain control characters."
fi
# Keep mount specs strict to avoid YAML structure injection.
# Expected format: source:target[:options]
if [[ ! "$mount" =~ ^[^[:space:],:]+:[^[:space:],:]+(:[^[:space:],:]+)?$ ]]; then
fail "Invalid mount format '$mount'. Expected source:target[:options] without spaces."
fi
}
require_cmd docker
if ! docker compose version >/dev/null 2>&1; then
echo "Docker Compose not available (try: docker compose version)" >&2
if [[ ! -f "$SCRIPT_PATH" ]]; then
echo "Docker setup script not found at $SCRIPT_PATH" >&2
exit 1
fi
if [[ -z "$DOCKER_SOCKET_PATH" && "${DOCKER_HOST:-}" == unix://* ]]; then
DOCKER_SOCKET_PATH="${DOCKER_HOST#unix://}"
fi
if [[ -z "$DOCKER_SOCKET_PATH" ]]; then
DOCKER_SOCKET_PATH="/var/run/docker.sock"
fi
if is_truthy_value "$RAW_SANDBOX_SETTING"; then
SANDBOX_ENABLED="1"
fi
OPENCLAW_CONFIG_DIR="${OPENCLAW_CONFIG_DIR:-$HOME/.openclaw}"
OPENCLAW_WORKSPACE_DIR="${OPENCLAW_WORKSPACE_DIR:-$HOME/.openclaw/workspace}"
validate_mount_path_value "OPENCLAW_CONFIG_DIR" "$OPENCLAW_CONFIG_DIR"
validate_mount_path_value "OPENCLAW_WORKSPACE_DIR" "$OPENCLAW_WORKSPACE_DIR"
if [[ -n "$HOME_VOLUME_NAME" ]]; then
if [[ "$HOME_VOLUME_NAME" == *"/"* ]]; then
validate_mount_path_value "OPENCLAW_HOME_VOLUME" "$HOME_VOLUME_NAME"
else
validate_named_volume "$HOME_VOLUME_NAME"
fi
fi
if contains_disallowed_chars "$EXTRA_MOUNTS"; then
fail "OPENCLAW_EXTRA_MOUNTS cannot contain control characters."
fi
if [[ -n "$SANDBOX_ENABLED" ]]; then
validate_mount_path_value "OPENCLAW_DOCKER_SOCKET" "$DOCKER_SOCKET_PATH"
fi
if [[ -n "$TIMEZONE" ]]; then
if contains_disallowed_chars "$TIMEZONE"; then
fail "OPENCLAW_TZ contains unsupported control characters."
fi
if [[ ! "$TIMEZONE" =~ ^[A-Za-z0-9/_+\-]+$ ]]; then
fail "OPENCLAW_TZ must be a valid IANA timezone string (e.g. Asia/Shanghai)."
fi
if ! is_valid_timezone "$TIMEZONE"; then
fail "OPENCLAW_TZ must match a timezone in /usr/share/zoneinfo (e.g. Asia/Shanghai)."
fi
fi
mkdir -p "$OPENCLAW_CONFIG_DIR"
mkdir -p "$OPENCLAW_WORKSPACE_DIR"
# Seed directory tree eagerly so bind mounts work even on Docker Desktop/Windows
# where the container (even as root) cannot create new host subdirectories.
mkdir -p "$OPENCLAW_CONFIG_DIR/identity"
mkdir -p "$OPENCLAW_CONFIG_DIR/agents/main/agent"
mkdir -p "$OPENCLAW_CONFIG_DIR/agents/main/sessions"
export OPENCLAW_CONFIG_DIR
export OPENCLAW_WORKSPACE_DIR
export OPENCLAW_GATEWAY_PORT="${OPENCLAW_GATEWAY_PORT:-18789}"
export OPENCLAW_BRIDGE_PORT="${OPENCLAW_BRIDGE_PORT:-18790}"
export OPENCLAW_GATEWAY_BIND="${OPENCLAW_GATEWAY_BIND:-lan}"
export OPENCLAW_IMAGE="$IMAGE_NAME"
export OPENCLAW_DOCKER_APT_PACKAGES="${OPENCLAW_DOCKER_APT_PACKAGES:-}"
export OPENCLAW_EXTENSIONS="${OPENCLAW_EXTENSIONS:-}"
export OPENCLAW_EXTRA_MOUNTS="$EXTRA_MOUNTS"
export OPENCLAW_HOME_VOLUME="$HOME_VOLUME_NAME"
export OPENCLAW_ALLOW_INSECURE_PRIVATE_WS="${OPENCLAW_ALLOW_INSECURE_PRIVATE_WS:-}"
export OPENCLAW_SANDBOX="$SANDBOX_ENABLED"
export OPENCLAW_DOCKER_SOCKET="$DOCKER_SOCKET_PATH"
export OPENCLAW_TZ="$TIMEZONE"
# Detect Docker socket GID for sandbox group_add.
DOCKER_GID=""
if [[ -n "$SANDBOX_ENABLED" && -S "$DOCKER_SOCKET_PATH" ]]; then
DOCKER_GID="$(stat -c '%g' "$DOCKER_SOCKET_PATH" 2>/dev/null || stat -f '%g' "$DOCKER_SOCKET_PATH" 2>/dev/null || echo "")"
fi
export DOCKER_GID
if [[ -z "${OPENCLAW_GATEWAY_TOKEN:-}" ]]; then
EXISTING_CONFIG_TOKEN="$(read_config_gateway_token || true)"
if [[ -n "$EXISTING_CONFIG_TOKEN" ]]; then
OPENCLAW_GATEWAY_TOKEN="$EXISTING_CONFIG_TOKEN"
echo "Reusing gateway token from $OPENCLAW_CONFIG_DIR/openclaw.json"
else
DOTENV_GATEWAY_TOKEN="$(read_env_gateway_token "$ROOT_DIR/.env" || true)"
if [[ -n "$DOTENV_GATEWAY_TOKEN" ]]; then
OPENCLAW_GATEWAY_TOKEN="$DOTENV_GATEWAY_TOKEN"
echo "Reusing gateway token from $ROOT_DIR/.env"
elif command -v openssl >/dev/null 2>&1; then
OPENCLAW_GATEWAY_TOKEN="$(openssl rand -hex 32)"
else
OPENCLAW_GATEWAY_TOKEN="$(python3 - <<'PY'
import secrets
print(secrets.token_hex(32))
PY
)"
fi
fi
fi
export OPENCLAW_GATEWAY_TOKEN
COMPOSE_FILES=("$COMPOSE_FILE")
COMPOSE_ARGS=()
write_extra_compose() {
local home_volume="$1"
shift
local mount
local gateway_home_mount
local gateway_config_mount
local gateway_workspace_mount
cat >"$EXTRA_COMPOSE_FILE" <<'YAML'
services:
openclaw-gateway:
volumes:
YAML
if [[ -n "$home_volume" ]]; then
gateway_home_mount="${home_volume}:/home/node"
gateway_config_mount="${OPENCLAW_CONFIG_DIR}:/home/node/.openclaw"
gateway_workspace_mount="${OPENCLAW_WORKSPACE_DIR}:/home/node/.openclaw/workspace"
validate_mount_spec "$gateway_home_mount"
validate_mount_spec "$gateway_config_mount"
validate_mount_spec "$gateway_workspace_mount"
printf ' - %s\n' "$gateway_home_mount" >>"$EXTRA_COMPOSE_FILE"
printf ' - %s\n' "$gateway_config_mount" >>"$EXTRA_COMPOSE_FILE"
printf ' - %s\n' "$gateway_workspace_mount" >>"$EXTRA_COMPOSE_FILE"
fi
for mount in "$@"; do
validate_mount_spec "$mount"
printf ' - %s\n' "$mount" >>"$EXTRA_COMPOSE_FILE"
done
cat >>"$EXTRA_COMPOSE_FILE" <<'YAML'
openclaw-cli:
volumes:
YAML
if [[ -n "$home_volume" ]]; then
printf ' - %s\n' "$gateway_home_mount" >>"$EXTRA_COMPOSE_FILE"
printf ' - %s\n' "$gateway_config_mount" >>"$EXTRA_COMPOSE_FILE"
printf ' - %s\n' "$gateway_workspace_mount" >>"$EXTRA_COMPOSE_FILE"
fi
for mount in "$@"; do
validate_mount_spec "$mount"
printf ' - %s\n' "$mount" >>"$EXTRA_COMPOSE_FILE"
done
if [[ -n "$home_volume" && "$home_volume" != *"/"* ]]; then
validate_named_volume "$home_volume"
cat >>"$EXTRA_COMPOSE_FILE" <<YAML
volumes:
${home_volume}:
YAML
fi
}
# When sandbox is requested, ensure Docker CLI build arg is set for local builds.
# Docker socket mount is deferred until sandbox prerequisites are verified.
if [[ -n "$SANDBOX_ENABLED" ]]; then
if [[ -z "${OPENCLAW_INSTALL_DOCKER_CLI:-}" ]]; then
export OPENCLAW_INSTALL_DOCKER_CLI=1
fi
fi
VALID_MOUNTS=()
if [[ -n "$EXTRA_MOUNTS" ]]; then
IFS=',' read -r -a mounts <<<"$EXTRA_MOUNTS"
for mount in "${mounts[@]}"; do
mount="${mount#"${mount%%[![:space:]]*}"}"
mount="${mount%"${mount##*[![:space:]]}"}"
if [[ -n "$mount" ]]; then
VALID_MOUNTS+=("$mount")
fi
done
fi
if [[ -n "$HOME_VOLUME_NAME" || ${#VALID_MOUNTS[@]} -gt 0 ]]; then
# Bash 3.2 + nounset treats "${array[@]}" on an empty array as unbound.
if [[ ${#VALID_MOUNTS[@]} -gt 0 ]]; then
write_extra_compose "$HOME_VOLUME_NAME" "${VALID_MOUNTS[@]}"
else
write_extra_compose "$HOME_VOLUME_NAME"
fi
COMPOSE_FILES+=("$EXTRA_COMPOSE_FILE")
fi
for compose_file in "${COMPOSE_FILES[@]}"; do
COMPOSE_ARGS+=("-f" "$compose_file")
done
# Keep a base compose arg set without sandbox overlay so rollback paths can
# force a known-safe gateway service definition (no docker.sock mount).
BASE_COMPOSE_ARGS=("${COMPOSE_ARGS[@]}")
COMPOSE_HINT="docker compose"
for compose_file in "${COMPOSE_FILES[@]}"; do
COMPOSE_HINT+=" -f ${compose_file}"
done
ENV_FILE="$ROOT_DIR/.env"
upsert_env() {
local file="$1"
shift
local -a keys=("$@")
local tmp
tmp="$(mktemp)"
# Use a delimited string instead of an associative array so the script
# works with Bash 3.2 (macOS default) which lacks `declare -A`.
local seen=" "
if [[ -f "$file" ]]; then
while IFS= read -r line || [[ -n "$line" ]]; do
local key="${line%%=*}"
local replaced=false
for k in "${keys[@]}"; do
if [[ "$key" == "$k" ]]; then
printf '%s=%s\n' "$k" "${!k-}" >>"$tmp"
seen="$seen$k "
replaced=true
break
fi
done
if [[ "$replaced" == false ]]; then
printf '%s\n' "$line" >>"$tmp"
fi
done <"$file"
fi
for k in "${keys[@]}"; do
if [[ "$seen" != *" $k "* ]]; then
printf '%s=%s\n' "$k" "${!k-}" >>"$tmp"
fi
done
mv "$tmp" "$file"
}
upsert_env "$ENV_FILE" \
OPENCLAW_CONFIG_DIR \
OPENCLAW_WORKSPACE_DIR \
OPENCLAW_GATEWAY_PORT \
OPENCLAW_BRIDGE_PORT \
OPENCLAW_GATEWAY_BIND \
OPENCLAW_GATEWAY_TOKEN \
OPENCLAW_IMAGE \
OPENCLAW_EXTRA_MOUNTS \
OPENCLAW_HOME_VOLUME \
OPENCLAW_DOCKER_APT_PACKAGES \
OPENCLAW_EXTENSIONS \
OPENCLAW_SANDBOX \
OPENCLAW_DOCKER_SOCKET \
DOCKER_GID \
OPENCLAW_INSTALL_DOCKER_CLI \
OPENCLAW_ALLOW_INSECURE_PRIVATE_WS \
OPENCLAW_TZ
if [[ "$IMAGE_NAME" == "openclaw:local" ]]; then
echo "==> Building Docker image: $IMAGE_NAME"
docker build \
--build-arg "OPENCLAW_DOCKER_APT_PACKAGES=${OPENCLAW_DOCKER_APT_PACKAGES}" \
--build-arg "OPENCLAW_EXTENSIONS=${OPENCLAW_EXTENSIONS}" \
--build-arg "OPENCLAW_INSTALL_DOCKER_CLI=${OPENCLAW_INSTALL_DOCKER_CLI:-}" \
-t "$IMAGE_NAME" \
-f "$ROOT_DIR/Dockerfile" \
"$ROOT_DIR"
else
echo "==> Pulling Docker image: $IMAGE_NAME"
if ! docker pull "$IMAGE_NAME"; then
echo "ERROR: Failed to pull image $IMAGE_NAME. Please check the image name and your access permissions." >&2
exit 1
fi
fi
# Ensure bind-mounted data directories are writable by the container's `node`
# user (uid 1000). Host-created dirs inherit the host user's uid which may
# differ, causing EACCES when the container tries to mkdir/write.
# Running a brief root container to chown is the portable Docker idiom --
# it works regardless of the host uid and doesn't require host-side root.
echo ""
echo "==> Fixing data-directory permissions"
# Use -xdev to restrict chown to the config-dir mount only — without it,
# the recursive chown would cross into the workspace bind mount and rewrite
# ownership of all user project files on Linux hosts.
# After fixing the config dir, only the OpenClaw metadata subdirectory
# (.openclaw/) inside the workspace gets chowned, not the user's project files.
docker compose "${COMPOSE_ARGS[@]}" run --rm --user root --entrypoint sh openclaw-cli -c \
'find /home/node/.openclaw -xdev -exec chown node:node {} +; \
[ -d /home/node/.openclaw/workspace/.openclaw ] && chown -R node:node /home/node/.openclaw/workspace/.openclaw || true'
echo ""
echo "==> Onboarding (interactive)"
echo "Docker setup pins Gateway mode to local."
echo "Gateway runtime bind comes from OPENCLAW_GATEWAY_BIND (default: lan)."
echo "Current runtime bind: $OPENCLAW_GATEWAY_BIND"
echo "Gateway token: $OPENCLAW_GATEWAY_TOKEN"
echo "Tailscale exposure: Off (use host-level tailnet/Tailscale setup separately)."
echo "Install Gateway daemon: No (managed by Docker Compose)"
echo ""
docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli onboard --mode local --no-install-daemon
echo ""
echo "==> Docker gateway defaults"
sync_gateway_mode_and_bind
echo ""
echo "==> Control UI origin allowlist"
ensure_control_ui_allowed_origins
echo ""
echo "==> Provider setup (optional)"
echo "WhatsApp (QR):"
echo " ${COMPOSE_HINT} run --rm openclaw-cli channels login"
echo "Telegram (bot token):"
echo " ${COMPOSE_HINT} run --rm openclaw-cli channels add --channel telegram --token <token>"
echo "Discord (bot token):"
echo " ${COMPOSE_HINT} run --rm openclaw-cli channels add --channel discord --token <token>"
echo "Docs: https://docs.openclaw.ai/channels"
echo ""
echo "==> Starting gateway"
docker compose "${COMPOSE_ARGS[@]}" up -d openclaw-gateway
# --- Sandbox setup (opt-in via OPENCLAW_SANDBOX=1) ---
if [[ -n "$SANDBOX_ENABLED" ]]; then
echo ""
echo "==> Sandbox setup"
# Build sandbox image if Dockerfile.sandbox exists.
if [[ -f "$ROOT_DIR/Dockerfile.sandbox" ]]; then
echo "Building sandbox image: openclaw-sandbox:bookworm-slim"
docker build \
-t "openclaw-sandbox:bookworm-slim" \
-f "$ROOT_DIR/Dockerfile.sandbox" \
"$ROOT_DIR"
else
echo "WARNING: Dockerfile.sandbox not found in $ROOT_DIR" >&2
echo " Sandbox config will be applied but no sandbox image will be built." >&2
echo " Agent exec may fail if the configured sandbox image does not exist." >&2
fi
# Defense-in-depth: verify Docker CLI in the running image before enabling
# sandbox. This avoids claiming sandbox is enabled when the image cannot
# launch sandbox containers.
if ! docker compose "${COMPOSE_ARGS[@]}" run --rm --entrypoint docker openclaw-gateway --version >/dev/null 2>&1; then
echo "WARNING: Docker CLI not found inside the container image." >&2
echo " Sandbox requires Docker CLI. Rebuild with --build-arg OPENCLAW_INSTALL_DOCKER_CLI=1" >&2
echo " or use a local build (OPENCLAW_IMAGE=openclaw:local). Skipping sandbox setup." >&2
SANDBOX_ENABLED=""
fi
fi
# Apply sandbox config only if prerequisites are met.
if [[ -n "$SANDBOX_ENABLED" ]]; then
# Mount Docker socket via a dedicated compose overlay. This overlay is
# created only after sandbox prerequisites pass, so the socket is never
# exposed when sandbox cannot actually run.
if [[ -S "$DOCKER_SOCKET_PATH" ]]; then
SANDBOX_COMPOSE_FILE="$ROOT_DIR/docker-compose.sandbox.yml"
cat >"$SANDBOX_COMPOSE_FILE" <<YAML
services:
openclaw-gateway:
volumes:
- ${DOCKER_SOCKET_PATH}:/var/run/docker.sock
YAML
if [[ -n "${DOCKER_GID:-}" ]]; then
cat >>"$SANDBOX_COMPOSE_FILE" <<YAML
group_add:
- "${DOCKER_GID}"
YAML
fi
COMPOSE_ARGS+=("-f" "$SANDBOX_COMPOSE_FILE")
echo "==> Sandbox: added Docker socket mount"
else
echo "WARNING: OPENCLAW_SANDBOX enabled but Docker socket not found at $DOCKER_SOCKET_PATH." >&2
echo " Sandbox requires Docker socket access. Skipping sandbox setup." >&2
SANDBOX_ENABLED=""
fi
fi
if [[ -n "$SANDBOX_ENABLED" ]]; then
# Enable sandbox in OpenClaw config.
sandbox_config_ok=true
if ! docker compose "${COMPOSE_ARGS[@]}" run --rm --no-deps openclaw-cli \
config set agents.defaults.sandbox.mode "non-main" >/dev/null; then
echo "WARNING: Failed to set agents.defaults.sandbox.mode" >&2
sandbox_config_ok=false
fi
if ! docker compose "${COMPOSE_ARGS[@]}" run --rm --no-deps openclaw-cli \
config set agents.defaults.sandbox.scope "agent" >/dev/null; then
echo "WARNING: Failed to set agents.defaults.sandbox.scope" >&2
sandbox_config_ok=false
fi
if ! docker compose "${COMPOSE_ARGS[@]}" run --rm --no-deps openclaw-cli \
config set agents.defaults.sandbox.workspaceAccess "none" >/dev/null; then
echo "WARNING: Failed to set agents.defaults.sandbox.workspaceAccess" >&2
sandbox_config_ok=false
fi
if [[ "$sandbox_config_ok" == true ]]; then
echo "Sandbox enabled: mode=non-main, scope=agent, workspaceAccess=none"
echo "Docs: https://docs.openclaw.ai/gateway/sandboxing"
# Restart gateway with sandbox compose overlay to pick up socket mount + config.
docker compose "${COMPOSE_ARGS[@]}" up -d openclaw-gateway
else
echo "WARNING: Sandbox config was partially applied. Check errors above." >&2
echo " Skipping gateway restart to avoid exposing Docker socket without a full sandbox policy." >&2
if ! docker compose "${BASE_COMPOSE_ARGS[@]}" run --rm --no-deps openclaw-cli \
config set agents.defaults.sandbox.mode "off" >/dev/null; then
echo "WARNING: Failed to roll back agents.defaults.sandbox.mode to off" >&2
else
echo "Sandbox mode rolled back to off due to partial sandbox config failure."
fi
if [[ -n "${SANDBOX_COMPOSE_FILE:-}" ]]; then
rm -f "$SANDBOX_COMPOSE_FILE"
fi
# Ensure gateway service definition is reset without sandbox overlay mount.
docker compose "${BASE_COMPOSE_ARGS[@]}" up -d --force-recreate openclaw-gateway
fi
else
# Keep reruns deterministic: if sandbox is not active for this run, reset
# persisted sandbox mode so future execs do not require docker.sock by stale
# config alone.
if ! docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \
config set agents.defaults.sandbox.mode "off" >/dev/null; then
echo "WARNING: Failed to reset agents.defaults.sandbox.mode to off" >&2
fi
if [[ -f "$ROOT_DIR/docker-compose.sandbox.yml" ]]; then
rm -f "$ROOT_DIR/docker-compose.sandbox.yml"
fi
fi
echo ""
echo "Gateway running with host port mapping."
echo "Access from tailnet devices via the host's tailnet IP."
echo "Config: $OPENCLAW_CONFIG_DIR"
echo "Workspace: $OPENCLAW_WORKSPACE_DIR"
echo "Token: $OPENCLAW_GATEWAY_TOKEN"
echo ""
echo "Commands:"
echo " ${COMPOSE_HINT} logs -f openclaw-gateway"
echo " ${COMPOSE_HINT} exec openclaw-gateway node dist/index.js health --token \"$OPENCLAW_GATEWAY_TOKEN\""
exec "$SCRIPT_PATH" "$@"

View File

@ -22101,6 +22101,34 @@
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.ackReaction",
"kind": "channel",
"type": "string",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.ackReactionScope",
"kind": "channel",
"type": "string",
"required": false,
"enumValues": [
"group-mentions",
"group-all",
"direct",
"all",
"none",
"off"
],
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.actions",
"kind": "channel",
@ -22151,6 +22179,16 @@
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.actions.profile",
"kind": "channel",
"type": "boolean",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.actions.reactions",
"kind": "channel",
@ -22161,6 +22199,16 @@
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.actions.verification",
"kind": "channel",
"type": "boolean",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.allowlistOnly",
"kind": "channel",
@ -22209,6 +22257,16 @@
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.avatarUrl",
"kind": "channel",
"type": "string",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.chunkMode",
"kind": "channel",
@ -22233,6 +22291,16 @@
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.deviceId",
"kind": "channel",
"type": "string",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.deviceName",
"kind": "channel",
@ -22651,6 +22719,20 @@
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.reactionNotifications",
"kind": "channel",
"type": "string",
"required": false,
"enumValues": [
"off",
"own"
],
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.replyToMode",
"kind": "channel",
@ -22859,6 +22941,30 @@
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.startupVerification",
"kind": "channel",
"type": "string",
"required": false,
"enumValues": [
"off",
"if-unverified"
],
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.startupVerificationCooldownHours",
"kind": "channel",
"type": "number",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.textChunkLimit",
"kind": "channel",
@ -22869,6 +22975,66 @@
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.threadBindings",
"kind": "channel",
"type": "object",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": true
},
{
"path": "channels.matrix.threadBindings.enabled",
"kind": "channel",
"type": "boolean",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.threadBindings.idleHours",
"kind": "channel",
"type": "number",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.threadBindings.maxAgeHours",
"kind": "channel",
"type": "number",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.threadBindings.spawnAcpSessions",
"kind": "channel",
"type": "boolean",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.threadBindings.spawnSubagentSessions",
"kind": "channel",
"type": "boolean",
"required": false,
"deprecated": false,
"sensitive": false,
"tags": [],
"hasChildren": false
},
{
"path": "channels.matrix.threadReplies",
"kind": "channel",

View File

@ -1,4 +1,4 @@
{"generatedBy":"scripts/generate-config-doc-baseline.ts","recordType":"meta","totalPaths":5518}
{"generatedBy":"scripts/generate-config-doc-baseline.ts","recordType":"meta","totalPaths":5533}
{"recordType":"path","path":"acp","kind":"core","type":"object","required":false,"deprecated":false,"sensitive":false,"tags":["advanced"],"label":"ACP","help":"ACP runtime controls for enabling dispatch, selecting backends, constraining allowed agent targets, and tuning streamed turn projection behavior.","hasChildren":true}
{"recordType":"path","path":"acp.allowedAgents","kind":"core","type":"array","required":false,"deprecated":false,"sensitive":false,"tags":["access"],"label":"ACP Allowed Agents","help":"Allowlist of ACP target agent ids permitted for ACP runtime sessions. Empty means no additional allowlist restriction.","hasChildren":true}
{"recordType":"path","path":"acp.allowedAgents.*","kind":"core","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
@ -1984,18 +1984,24 @@
{"recordType":"path","path":"channels.matrix.accessToken","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.accounts","kind":"channel","type":"object","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":true}
{"recordType":"path","path":"channels.matrix.accounts.*","kind":"channel","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.ackReaction","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.ackReactionScope","kind":"channel","type":"string","required":false,"enumValues":["group-mentions","group-all","direct","all","none","off"],"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.actions","kind":"channel","type":"object","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":true}
{"recordType":"path","path":"channels.matrix.actions.channelInfo","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.actions.memberInfo","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.actions.messages","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.actions.pins","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.actions.profile","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.actions.reactions","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.actions.verification","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.allowlistOnly","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.autoJoin","kind":"channel","type":"string","required":false,"enumValues":["always","allowlist","off"],"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.autoJoinAllowlist","kind":"channel","type":"array","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":true}
{"recordType":"path","path":"channels.matrix.autoJoinAllowlist.*","kind":"channel","type":["number","string"],"required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.avatarUrl","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.chunkMode","kind":"channel","type":"string","required":false,"enumValues":["length","newline"],"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.defaultAccount","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.deviceId","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.deviceName","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.dm","kind":"channel","type":"object","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":true}
{"recordType":"path","path":"channels.matrix.dm.allowFrom","kind":"channel","type":"array","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":true}
@ -2035,6 +2041,7 @@
{"recordType":"path","path":"channels.matrix.password.id","kind":"channel","type":"string","required":true,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.password.provider","kind":"channel","type":"string","required":true,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.password.source","kind":"channel","type":"string","required":true,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.reactionNotifications","kind":"channel","type":"string","required":false,"enumValues":["off","own"],"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.replyToMode","kind":"channel","type":"string","required":false,"enumValues":["off","first","all"],"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.responsePrefix","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.rooms","kind":"channel","type":"object","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":true}
@ -2055,7 +2062,15 @@
{"recordType":"path","path":"channels.matrix.rooms.*.tools.deny.*","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.rooms.*.users","kind":"channel","type":"array","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":true}
{"recordType":"path","path":"channels.matrix.rooms.*.users.*","kind":"channel","type":["number","string"],"required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.startupVerification","kind":"channel","type":"string","required":false,"enumValues":["off","if-unverified"],"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.startupVerificationCooldownHours","kind":"channel","type":"number","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.textChunkLimit","kind":"channel","type":"number","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.threadBindings","kind":"channel","type":"object","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":true}
{"recordType":"path","path":"channels.matrix.threadBindings.enabled","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.threadBindings.idleHours","kind":"channel","type":"number","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.threadBindings.maxAgeHours","kind":"channel","type":"number","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.threadBindings.spawnAcpSessions","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.threadBindings.spawnSubagentSessions","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.threadReplies","kind":"channel","type":"string","required":false,"enumValues":["off","inbound","always"],"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.matrix.userId","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.mattermost","kind":"channel","type":"object","required":false,"deprecated":false,"sensitive":false,"tags":["channels","network"],"label":"Mattermost","help":"self-hosted Slack-style chat; install the plugin to enable.","hasChildren":true}

View File

@ -1,6 +1,5 @@
---
title: IRC
description: Connect OpenClaw to IRC channels and direct messages.
summary: "IRC plugin setup, access controls, and troubleshooting"
read_when:
- You want to connect OpenClaw to IRC channels or DMs

View File

@ -67,7 +67,7 @@ If you use the `device-pair` plugin, you can do first-time device pairing entire
2. The bot replies with two messages: an instruction message and a separate **setup code** message (easy to copy/paste in Telegram).
3. On your phone, open the OpenClaw iOS app → Settings → Gateway.
4. Paste the setup code and connect.
5. Back in Telegram: `/pair approve`
5. Back in Telegram: `/pair pending` (review request IDs, role, and scopes), then approve.
The setup code is a base64-encoded JSON payload that contains:
@ -84,6 +84,10 @@ openclaw devices approve <requestId>
openclaw devices reject <requestId>
```
If the same device retries with different auth details (for example different
role/scopes/public key), the previous pending request is superseded and a new
`requestId` is created.
### Node pairing state storage
Stored under `~/.openclaw/devices/`:

View File

@ -346,7 +346,13 @@ curl "https://api.telegram.org/bot<bot_token>/getUpdates"
1. `/pair` generates setup code
2. paste code in iOS app
3. `/pair approve` approves latest pending request
3. `/pair pending` lists pending requests (including role/scopes)
4. approve the request:
- `/pair approve <requestId>` for explicit approval
- `/pair approve` when there is only one pending request
- `/pair approve latest` for most recent
If a device retries with changed auth details (for example role/scopes/public key), the previous pending request is superseded and the new request uses a different `requestId`. Re-run `/pair pending` before approving.
More details: [Pairing](/channels/pairing#pair-via-telegram-recommended-for-ios).

View File

@ -1,6 +1,5 @@
---
title: CI Pipeline
description: How the OpenClaw CI pipeline works
summary: "CI job graph, scope gates, and local command equivalents"
read_when:
- You need to understand why a CI job did or did not run

View File

@ -21,6 +21,9 @@ openclaw devices list
openclaw devices list --json
```
Pending request output includes the requested role and scopes so approvals can
be reviewed before you approve.
### `openclaw devices remove <deviceId>`
Remove one paired device entry.
@ -45,6 +48,11 @@ openclaw devices clear --yes --pending --json
Approve a pending device pairing request. If `requestId` is omitted, OpenClaw
automatically approves the most recent pending request.
Note: if a device retries pairing with changed auth details (role/scopes/public
key), OpenClaw supersedes the previous pending entry and issues a new
`requestId`. Run `openclaw devices list` right before approval to use the
current ID.
```
openclaw devices approve
openclaw devices approve <requestId>

View File

@ -111,6 +111,10 @@ openclaw devices list
openclaw devices approve <requestId>
```
If the node retries pairing with changed auth details (role/scopes/public key),
the previous pending request is superseded and a new `requestId` is created.
Run `openclaw devices list` again before approval.
The node host stores its node id, token, display name, and gateway connection info in
`~/.openclaw/node.json`.

View File

@ -52,6 +52,18 @@
]
},
"redirects": [
{
"source": "/platforms/oracle",
"destination": "/install/oracle"
},
{
"source": "/platforms/digitalocean",
"destination": "/install/digitalocean"
},
{
"source": "/platforms/raspberry-pi",
"destination": "/install/raspberry-pi"
},
{
"source": "/brave-search",
"destination": "/tools/brave-search"
@ -885,17 +897,20 @@
"group": "Hosting",
"pages": [
"install/azure",
"install/digitalocean",
"install/docker-vm-runtime",
"install/exe-dev",
"install/fly",
"install/gcp",
"install/hetzner",
"install/kubernetes",
"vps",
"install/macos-vm",
"install/northflank",
"install/oracle",
"install/railway",
"install/render",
"vps"
"install/raspberry-pi",
"install/render"
]
},
{
@ -1164,10 +1179,7 @@
"platforms/linux",
"platforms/windows",
"platforms/android",
"platforms/ios",
"platforms/digitalocean",
"platforms/oracle",
"platforms/raspberry-pi"
"platforms/ios"
]
},
{

View File

@ -1,6 +1,5 @@
---
title: "Configuration Reference"
description: "Complete field-by-field reference for ~/.openclaw/openclaw.json"
summary: "Complete reference for every OpenClaw config key, defaults, and channel settings"
read_when:
- You need exact field-level config semantics or defaults

View File

@ -399,7 +399,7 @@ Security defaults:
Docker installs and the containerized gateway live here:
[Docker](/install/docker)
For Docker gateway deployments, `docker-setup.sh` can bootstrap sandbox config.
For Docker gateway deployments, `scripts/docker/setup.sh` can bootstrap sandbox config.
Set `OPENCLAW_SANDBOX=1` (or `true`/`yes`/`on`) to enable that path. You can
override socket location with `OPENCLAW_DOCKER_SOCKET`. Full setup and env
reference: [Docker](/install/docker#enable-agent-sandbox-for-docker-gateway-opt-in).

View File

@ -1,4 +1,5 @@
---
title: "Trusted Proxy Auth"
summary: "Delegate gateway authentication to a trusted reverse proxy (Pomerium, Caddy, nginx + OAuth)"
read_when:
- Running OpenClaw behind an identity-aware proxy

File diff suppressed because it is too large Load Diff

View File

@ -19,3 +19,10 @@ If you want a quick “get unstuck” flow, start here:
If youre looking for conceptual questions (not “something broke”):
- [FAQ (concepts)](/help/faq)
## Environment and debugging
- **Environment variables:** [Where OpenClaw loads env vars and precedence](/help/environment)
- **Debugging:** [Watch mode, raw streams, and dev profile](/help/debugging)
- **Testing:** [Test suites, live tests, and Docker runners](/help/testing)
- **Scripts:** [Repository helper scripts](/help/scripts)

View File

@ -0,0 +1,129 @@
---
summary: "Host OpenClaw on a DigitalOcean Droplet"
read_when:
- Setting up OpenClaw on DigitalOcean
- Looking for a simple paid VPS for OpenClaw
title: "DigitalOcean"
---
# DigitalOcean
Run a persistent OpenClaw Gateway on a DigitalOcean Droplet.
## Prerequisites
- DigitalOcean account ([signup](https://cloud.digitalocean.com/registrations/new))
- SSH key pair (or willingness to use password auth)
- About 20 minutes
## Setup
<Steps>
<Step title="Create a Droplet">
<Warning>
Use a clean base image (Ubuntu 24.04 LTS). Avoid third-party Marketplace 1-click images unless you have reviewed their startup scripts and firewall defaults.
</Warning>
1. Log into [DigitalOcean](https://cloud.digitalocean.com/).
2. Click **Create > Droplets**.
3. Choose:
- **Region:** Closest to you
- **Image:** Ubuntu 24.04 LTS
- **Size:** Basic, Regular, 1 vCPU / 1 GB RAM / 25 GB SSD
- **Authentication:** SSH key (recommended) or password
4. Click **Create Droplet** and note the IP address.
</Step>
<Step title="Connect and install">
```bash
ssh root@YOUR_DROPLET_IP
apt update && apt upgrade -y
# Install Node.js 24
curl -fsSL https://deb.nodesource.com/setup_24.x | bash -
apt install -y nodejs
# Install OpenClaw
curl -fsSL https://openclaw.ai/install.sh | bash
openclaw --version
```
</Step>
<Step title="Run onboarding">
```bash
openclaw onboard --install-daemon
```
The wizard walks you through model auth, channel setup, gateway token generation, and daemon installation (systemd).
</Step>
<Step title="Add swap (recommended for 1 GB Droplets)">
```bash
fallocate -l 2G /swapfile
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
echo '/swapfile none swap sw 0 0' >> /etc/fstab
```
</Step>
<Step title="Verify the gateway">
```bash
openclaw status
systemctl --user status openclaw-gateway.service
journalctl --user -u openclaw-gateway.service -f
```
</Step>
<Step title="Access the Control UI">
The gateway binds to loopback by default. Pick one of these options.
**Option A: SSH tunnel (simplest)**
```bash
# From your local machine
ssh -L 18789:localhost:18789 root@YOUR_DROPLET_IP
```
Then open `http://localhost:18789`.
**Option B: Tailscale Serve**
```bash
curl -fsSL https://tailscale.com/install.sh | sh
tailscale up
openclaw config set gateway.tailscale.mode serve
openclaw gateway restart
```
Then open `https://<magicdns>/` from any device on your tailnet.
**Option C: Tailnet bind (no Serve)**
```bash
openclaw config set gateway.bind tailnet
openclaw gateway restart
```
Then open `http://<tailscale-ip>:18789` (token required).
</Step>
</Steps>
## Troubleshooting
**Gateway will not start** -- Run `openclaw doctor --non-interactive` and check logs with `journalctl --user -u openclaw-gateway.service -n 50`.
**Port already in use** -- Run `lsof -i :18789` to find the process, then stop it.
**Out of memory** -- Verify swap is active with `free -h`. If still hitting OOM, use API-based models (Claude, GPT) rather than local models, or upgrade to a 2 GB Droplet.
## Next steps
- [Channels](/channels) -- connect Telegram, WhatsApp, Discord, and more
- [Gateway configuration](/gateway/configuration) -- all config options
- [Updating](/install/updating) -- keep OpenClaw up to date

View File

@ -32,14 +32,14 @@ Docker is **optional**. Use it only if you want a containerized gateway or to va
From the repo root, run the setup script:
```bash
./docker-setup.sh
./scripts/docker/setup.sh
```
This builds the gateway image locally. To use a pre-built image instead:
```bash
export OPENCLAW_IMAGE="ghcr.io/openclaw/openclaw:latest"
./docker-setup.sh
./scripts/docker/setup.sh
```
Pre-built images are published at the
@ -139,7 +139,7 @@ docker compose exec openclaw-gateway node dist/index.js health --token "$OPENCLA
### LAN vs loopback
`docker-setup.sh` defaults `OPENCLAW_GATEWAY_BIND=lan` so host access to
`scripts/docker/setup.sh` defaults `OPENCLAW_GATEWAY_BIND=lan` so host access to
`http://127.0.0.1:18789` works with Docker port publishing.
- `lan` (default): host browser and host CLI can reach the published gateway port.
@ -180,7 +180,7 @@ See the [`ClawDock` Helper README](https://github.com/openclaw/openclaw/blob/mai
<Accordion title="Enable agent sandbox for Docker gateway">
```bash
export OPENCLAW_SANDBOX=1
./docker-setup.sh
./scripts/docker/setup.sh
```
Custom socket path (e.g. rootless Docker):
@ -188,7 +188,7 @@ See the [`ClawDock` Helper README](https://github.com/openclaw/openclaw/blob/mai
```bash
export OPENCLAW_SANDBOX=1
export OPENCLAW_DOCKER_SOCKET=/run/user/1000/docker.sock
./docker-setup.sh
./scripts/docker/setup.sh
```
The script mounts `docker.sock` only after sandbox prerequisites pass. If

View File

@ -1,6 +1,5 @@
---
title: Fly.io
description: Deploy OpenClaw on Fly.io
summary: "Step-by-step Fly.io deployment for OpenClaw with persistent storage and HTTPS"
read_when:
- Deploying OpenClaw on Fly.io

156
docs/install/oracle.md Normal file
View File

@ -0,0 +1,156 @@
---
summary: "Host OpenClaw on Oracle Cloud's Always Free ARM tier"
read_when:
- Setting up OpenClaw on Oracle Cloud
- Looking for free VPS hosting for OpenClaw
- Want 24/7 OpenClaw on a small server
title: "Oracle Cloud"
---
# Oracle Cloud
Run a persistent OpenClaw Gateway on Oracle Cloud's **Always Free** ARM tier (up to 4 OCPU, 24 GB RAM, 200 GB storage) at no cost.
## Prerequisites
- Oracle Cloud account ([signup](https://www.oracle.com/cloud/free/)) -- see [community signup guide](https://gist.github.com/rssnyder/51e3cfedd730e7dd5f4a816143b25dbd) if you hit issues
- Tailscale account (free at [tailscale.com](https://tailscale.com))
- An SSH key pair
- About 30 minutes
## Setup
<Steps>
<Step title="Create an OCI instance">
1. Log into [Oracle Cloud Console](https://cloud.oracle.com/).
2. Navigate to **Compute > Instances > Create Instance**.
3. Configure:
- **Name:** `openclaw`
- **Image:** Ubuntu 24.04 (aarch64)
- **Shape:** `VM.Standard.A1.Flex` (Ampere ARM)
- **OCPUs:** 2 (or up to 4)
- **Memory:** 12 GB (or up to 24 GB)
- **Boot volume:** 50 GB (up to 200 GB free)
- **SSH key:** Add your public key
4. Click **Create** and note the public IP address.
<Tip>
If instance creation fails with "Out of capacity", try a different availability domain or retry later. Free tier capacity is limited.
</Tip>
</Step>
<Step title="Connect and update the system">
```bash
ssh ubuntu@YOUR_PUBLIC_IP
sudo apt update && sudo apt upgrade -y
sudo apt install -y build-essential
```
`build-essential` is required for ARM compilation of some dependencies.
</Step>
<Step title="Configure user and hostname">
```bash
sudo hostnamectl set-hostname openclaw
sudo passwd ubuntu
sudo loginctl enable-linger ubuntu
```
Enabling linger keeps user services running after logout.
</Step>
<Step title="Install Tailscale">
```bash
curl -fsSL https://tailscale.com/install.sh | sh
sudo tailscale up --ssh --hostname=openclaw
```
From now on, connect via Tailscale: `ssh ubuntu@openclaw`.
</Step>
<Step title="Install OpenClaw">
```bash
curl -fsSL https://openclaw.ai/install.sh | bash
source ~/.bashrc
```
When prompted "How do you want to hatch your bot?", select **Do this later**.
</Step>
<Step title="Configure the gateway">
Use token auth with Tailscale Serve for secure remote access.
```bash
openclaw config set gateway.bind loopback
openclaw config set gateway.auth.mode token
openclaw doctor --generate-gateway-token
openclaw config set gateway.tailscale.mode serve
openclaw config set gateway.trustedProxies '["127.0.0.1"]'
systemctl --user restart openclaw-gateway
```
</Step>
<Step title="Lock down VCN security">
Block all traffic except Tailscale at the network edge:
1. Go to **Networking > Virtual Cloud Networks** in the OCI Console.
2. Click your VCN, then **Security Lists > Default Security List**.
3. **Remove** all ingress rules except `0.0.0.0/0 UDP 41641` (Tailscale).
4. Keep default egress rules (allow all outbound).
This blocks SSH on port 22, HTTP, HTTPS, and everything else at the network edge. You can only connect via Tailscale from this point on.
</Step>
<Step title="Verify">
```bash
openclaw --version
systemctl --user status openclaw-gateway
tailscale serve status
curl http://localhost:18789
```
Access the Control UI from any device on your tailnet:
```
https://openclaw.<tailnet-name>.ts.net/
```
Replace `<tailnet-name>` with your tailnet name (visible in `tailscale status`).
</Step>
</Steps>
## Fallback: SSH tunnel
If Tailscale Serve is not working, use an SSH tunnel from your local machine:
```bash
ssh -L 18789:127.0.0.1:18789 ubuntu@openclaw
```
Then open `http://localhost:18789`.
## Troubleshooting
**Instance creation fails ("Out of capacity")** -- Free tier ARM instances are popular. Try a different availability domain or retry during off-peak hours.
**Tailscale will not connect** -- Run `sudo tailscale up --ssh --hostname=openclaw --reset` to re-authenticate.
**Gateway will not start** -- Run `openclaw doctor --non-interactive` and check logs with `journalctl --user -u openclaw-gateway -n 50`.
**ARM binary issues** -- Most npm packages work on ARM64. For native binaries, look for `linux-arm64` or `aarch64` releases. Verify architecture with `uname -m`.
## Next steps
- [Channels](/channels) -- connect Telegram, WhatsApp, Discord, and more
- [Gateway configuration](/gateway/configuration) -- all config options
- [Updating](/install/updating) -- keep OpenClaw up to date

View File

@ -21,7 +21,7 @@ Run the OpenClaw Gateway in a **rootless** Podman container. Uses the same image
From the repo root, run the setup script. It creates a dedicated `openclaw` user, builds the container image, and installs the launch script:
```bash
./setup-podman.sh
./scripts/podman/setup.sh
```
This also creates a minimal config at `~openclaw/.openclaw/openclaw.json` (sets `gateway.mode` to `"local"`) so the Gateway can start without running the wizard.
@ -29,12 +29,12 @@ Run the OpenClaw Gateway in a **rootless** Podman container. Uses the same image
By default the container is **not** installed as a systemd service -- you start it manually in the next step. For a production-style setup with auto-start and restarts, pass `--quadlet` instead:
```bash
./setup-podman.sh --quadlet
./scripts/podman/setup.sh --quadlet
```
(Or set `OPENCLAW_PODMAN_QUADLET=1`. Use `--container` to install only the container and launch script.)
**Optional build-time env vars** (set before running `setup-podman.sh`):
**Optional build-time env vars** (set before running `scripts/podman/setup.sh`):
- `OPENCLAW_DOCKER_APT_PACKAGES` -- install extra apt packages during image build.
- `OPENCLAW_EXTENSIONS` -- pre-install extension dependencies (space-separated names, e.g. `diagnostics-otel matrix`).
@ -64,7 +64,7 @@ Run the OpenClaw Gateway in a **rootless** Podman container. Uses the same image
## Systemd (Quadlet, optional)
If you ran `./setup-podman.sh --quadlet` (or `OPENCLAW_PODMAN_QUADLET=1`), a [Podman Quadlet](https://docs.podman.io/en/latest/markdown/podman-systemd.unit.5.html) unit is installed so the gateway runs as a systemd user service for the openclaw user. The service is enabled and started at the end of setup.
If you ran `./scripts/podman/setup.sh --quadlet` (or `OPENCLAW_PODMAN_QUADLET=1`), a [Podman Quadlet](https://docs.podman.io/en/latest/markdown/podman-systemd.unit.5.html) unit is installed so the gateway runs as a systemd user service for the openclaw user. The service is enabled and started at the end of setup.
- **Start:** `sudo systemctl --machine openclaw@ --user start openclaw.service`
- **Stop:** `sudo systemctl --machine openclaw@ --user stop openclaw.service`
@ -73,11 +73,11 @@ If you ran `./setup-podman.sh --quadlet` (or `OPENCLAW_PODMAN_QUADLET=1`), a [Po
The quadlet file lives at `~openclaw/.config/containers/systemd/openclaw.container`. To change ports or env, edit that file (or the `.env` it sources), then `sudo systemctl --machine openclaw@ --user daemon-reload` and restart the service. On boot, the service starts automatically if lingering is enabled for openclaw (setup does this when loginctl is available).
To add quadlet **after** an initial setup that did not use it, re-run: `./setup-podman.sh --quadlet`.
To add quadlet **after** an initial setup that did not use it, re-run: `./scripts/podman/setup.sh --quadlet`.
## The openclaw user (non-login)
`setup-podman.sh` creates a dedicated system user `openclaw`:
`scripts/podman/setup.sh` creates a dedicated system user `openclaw`:
- **Shell:** `nologin` — no interactive login; reduces attack surface.
- **Home:** e.g. `/home/openclaw` — holds `~/.openclaw` (config, workspace) and the launch script `run-openclaw-podman.sh`.
@ -98,7 +98,7 @@ To add quadlet **after** an initial setup that did not use it, re-run: `./setup-
## Environment and config
- **Token:** Stored in `~openclaw/.openclaw/.env` as `OPENCLAW_GATEWAY_TOKEN`. `setup-podman.sh` and `run-openclaw-podman.sh` generate it if missing (uses `openssl`, `python3`, or `od`).
- **Token:** Stored in `~openclaw/.openclaw/.env` as `OPENCLAW_GATEWAY_TOKEN`. `scripts/podman/setup.sh` and `run-openclaw-podman.sh` generate it if missing (uses `openssl`, `python3`, or `od`).
- **Optional:** In that `.env` you can set provider keys (e.g. `GROQ_API_KEY`, `OLLAMA_API_KEY`) and other OpenClaw env vars.
- **Host ports:** By default the script maps `18789` (gateway) and `18790` (bridge). Override the **host** port mapping with `OPENCLAW_PODMAN_GATEWAY_HOST_PORT` and `OPENCLAW_PODMAN_BRIDGE_HOST_PORT` when launching.
- **Gateway bind:** By default, `run-openclaw-podman.sh` starts the gateway with `--bind loopback` for safe local access. To expose on LAN, set `OPENCLAW_GATEWAY_BIND=lan` and configure `gateway.controlUi.allowedOrigins` (or explicitly enable host-header fallback) in `openclaw.json`.
@ -110,7 +110,7 @@ To add quadlet **after** an initial setup that did not use it, re-run: `./setup-
- **Ephemeral sandbox tmpfs:** if you enable `agents.defaults.sandbox`, the tool sandbox containers mount `tmpfs` at `/tmp`, `/var/tmp`, and `/run`. Those paths are memory-backed and disappear with the sandbox container; the top-level Podman container setup does not add its own tmpfs mounts.
- **Disk growth hotspots:** the main paths to watch are `media/`, `agents/<agentId>/sessions/sessions.json`, transcript JSONL files, `cron/runs/*.jsonl`, and rolling file logs under `/tmp/openclaw/` (or your configured `logging.file`).
`setup-podman.sh` now stages the image tar in a private temp directory and prints the chosen base dir during setup. For non-root runs it accepts `TMPDIR` only when that base is safe to use; otherwise it falls back to `/var/tmp`, then `/tmp`. The saved tar stays owner-only and is streamed into the target users `podman load`, so private caller temp dirs do not block setup.
`scripts/podman/setup.sh` now stages the image tar in a private temp directory and prints the chosen base dir during setup. For non-root runs it accepts `TMPDIR` only when that base is safe to use; otherwise it falls back to `/var/tmp`, then `/tmp`. The saved tar stays owner-only and is streamed into the target users `podman load`, so private caller temp dirs do not block setup.
## Useful commands
@ -122,12 +122,12 @@ To add quadlet **after** an initial setup that did not use it, re-run: `./setup-
## Troubleshooting
- **Permission denied (EACCES) on config or auth-profiles:** The container defaults to `--userns=keep-id` and runs as the same uid/gid as the host user running the script. Ensure your host `OPENCLAW_CONFIG_DIR` and `OPENCLAW_WORKSPACE_DIR` are owned by that user.
- **Gateway start blocked (missing `gateway.mode=local`):** Ensure `~openclaw/.openclaw/openclaw.json` exists and sets `gateway.mode="local"`. `setup-podman.sh` creates this file if missing.
- **Gateway start blocked (missing `gateway.mode=local`):** Ensure `~openclaw/.openclaw/openclaw.json` exists and sets `gateway.mode="local"`. `scripts/podman/setup.sh` creates this file if missing.
- **Rootless Podman fails for user openclaw:** Check `/etc/subuid` and `/etc/subgid` contain a line for `openclaw` (e.g. `openclaw:100000:65536`). Add it if missing and restart.
- **Container name in use:** The launch script uses `podman run --replace`, so the existing container is replaced when you start again. To clean up manually: `podman rm -f openclaw`.
- **Script not found when running as openclaw:** Ensure `setup-podman.sh` was run so that `run-openclaw-podman.sh` is copied to openclaws home (e.g. `/home/openclaw/run-openclaw-podman.sh`).
- **Script not found when running as openclaw:** Ensure `scripts/podman/setup.sh` was run so that `run-openclaw-podman.sh` is copied to openclaws home (e.g. `/home/openclaw/run-openclaw-podman.sh`).
- **Quadlet service not found or fails to start:** Run `sudo systemctl --machine openclaw@ --user daemon-reload` after editing the `.container` file. Quadlet requires cgroups v2: `podman info --format '{{.Host.CgroupsVersion}}'` should show `2`.
## Optional: run as your own user
To run the gateway as your normal user (no dedicated openclaw user): build the image, create `~/.openclaw/.env` with `OPENCLAW_GATEWAY_TOKEN`, and run the container with `--userns=keep-id` and mounts to your `~/.openclaw`. The launch script is designed for the openclaw-user flow; for a single-user setup you can instead run the `podman run` command from the script manually, pointing config and workspace to your home. Recommended for most users: use `setup-podman.sh` and run as the openclaw user so config and process are isolated.
To run the gateway as your normal user (no dedicated openclaw user): build the image, create `~/.openclaw/.env` with `OPENCLAW_GATEWAY_TOKEN`, and run the container with `--userns=keep-id` and mounts to your `~/.openclaw`. The launch script is designed for the openclaw-user flow; for a single-user setup you can instead run the `podman run` command from the script manually, pointing config and workspace to your home. Recommended for most users: use `scripts/podman/setup.sh` and run as the openclaw user so config and process are isolated.

View File

@ -0,0 +1,159 @@
---
summary: "Host OpenClaw on a Raspberry Pi for always-on self-hosting"
read_when:
- Setting up OpenClaw on a Raspberry Pi
- Running OpenClaw on ARM devices
- Building a cheap always-on personal AI
title: "Raspberry Pi"
---
# Raspberry Pi
Run a persistent, always-on OpenClaw Gateway on a Raspberry Pi. Since the Pi is just the gateway (models run in the cloud via API), even a modest Pi handles the workload well.
## Prerequisites
- Raspberry Pi 4 or 5 with 2 GB+ RAM (4 GB recommended)
- MicroSD card (16 GB+) or USB SSD (better performance)
- Official Pi power supply
- Network connection (Ethernet or WiFi)
- 64-bit Raspberry Pi OS (required -- do not use 32-bit)
- About 30 minutes
## Setup
<Steps>
<Step title="Flash the OS">
Use **Raspberry Pi OS Lite (64-bit)** -- no desktop needed for a headless server.
1. Download [Raspberry Pi Imager](https://www.raspberrypi.com/software/).
2. Choose OS: **Raspberry Pi OS Lite (64-bit)**.
3. In the settings dialog, pre-configure:
- Hostname: `gateway-host`
- Enable SSH
- Set username and password
- Configure WiFi (if not using Ethernet)
4. Flash to your SD card or USB drive, insert it, and boot the Pi.
</Step>
<Step title="Connect via SSH">
```bash
ssh user@gateway-host
```
</Step>
<Step title="Update the system">
```bash
sudo apt update && sudo apt upgrade -y
sudo apt install -y git curl build-essential
# Set timezone (important for cron and reminders)
sudo timedatectl set-timezone America/Chicago
```
</Step>
<Step title="Install Node.js 24">
```bash
curl -fsSL https://deb.nodesource.com/setup_24.x | sudo -E bash -
sudo apt install -y nodejs
node --version
```
</Step>
<Step title="Add swap (important for 2 GB or less)">
```bash
sudo fallocate -l 2G /swapfile
sudo chmod 600 /swapfile
sudo mkswap /swapfile
sudo swapon /swapfile
echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab
# Reduce swappiness for low-RAM devices
echo 'vm.swappiness=10' | sudo tee -a /etc/sysctl.conf
sudo sysctl -p
```
</Step>
<Step title="Install OpenClaw">
```bash
curl -fsSL https://openclaw.ai/install.sh | bash
```
</Step>
<Step title="Run onboarding">
```bash
openclaw onboard --install-daemon
```
Follow the wizard. API keys are recommended over OAuth for headless devices. Telegram is the easiest channel to start with.
</Step>
<Step title="Verify">
```bash
openclaw status
sudo systemctl status openclaw
journalctl -u openclaw -f
```
</Step>
<Step title="Access the Control UI">
On your computer, get a dashboard URL from the Pi:
```bash
ssh user@gateway-host 'openclaw dashboard --no-open'
```
Then create an SSH tunnel in another terminal:
```bash
ssh -N -L 18789:127.0.0.1:18789 user@gateway-host
```
Open the printed URL in your local browser. For always-on remote access, see [Tailscale integration](/gateway/tailscale).
</Step>
</Steps>
## Performance tips
**Use a USB SSD** -- SD cards are slow and wear out. A USB SSD dramatically improves performance. See the [Pi USB boot guide](https://www.raspberrypi.com/documentation/computers/raspberry-pi.html#usb-mass-storage-boot).
**Enable module compile cache** -- Speeds up repeated CLI invocations on lower-power Pi hosts:
```bash
grep -q 'NODE_COMPILE_CACHE=/var/tmp/openclaw-compile-cache' ~/.bashrc || cat >> ~/.bashrc <<'EOF' # pragma: allowlist secret
export NODE_COMPILE_CACHE=/var/tmp/openclaw-compile-cache
mkdir -p /var/tmp/openclaw-compile-cache
export OPENCLAW_NO_RESPAWN=1
EOF
source ~/.bashrc
```
**Reduce memory usage** -- For headless setups, free GPU memory and disable unused services:
```bash
echo 'gpu_mem=16' | sudo tee -a /boot/config.txt
sudo systemctl disable bluetooth
```
## Troubleshooting
**Out of memory** -- Verify swap is active with `free -h`. Disable unused services (`sudo systemctl disable cups bluetooth avahi-daemon`). Use API-based models only.
**Slow performance** -- Use a USB SSD instead of an SD card. Check for CPU throttling with `vcgencmd get_throttled` (should return `0x0`).
**Service will not start** -- Check logs with `journalctl -u openclaw --no-pager -n 100` and run `openclaw doctor --non-interactive`.
**ARM binary issues** -- If a skill fails with "exec format error", check whether the binary has an ARM64 build. Verify architecture with `uname -m` (should show `aarch64`).
**WiFi drops** -- Disable WiFi power management: `sudo iwconfig wlan0 power off`.
## Next steps
- [Channels](/channels) -- connect Telegram, WhatsApp, Discord, and more
- [Gateway configuration](/gateway/configuration) -- all config options
- [Updating](/install/updating) -- keep OpenClaw up to date

View File

@ -36,6 +36,10 @@ openclaw nodes status
openclaw nodes describe --node <idOrNameOrIp>
```
If a node retries with changed auth details (role/scopes/public key), the prior
pending request is superseded and a new `requestId` is created. Re-run
`openclaw devices list` before approving.
Notes:
- `nodes status` marks a node as **paired** when its device pairing role includes `node`.
@ -115,6 +119,9 @@ openclaw devices approve <requestId>
openclaw nodes status
```
If the node retries with changed auth details, re-run `openclaw devices list`
and approve the current `requestId`.
Naming options:
- `--display-name` on `openclaw node run` / `openclaw node install` (persists in `~/.openclaw/node.json` on the node).

View File

@ -42,6 +42,10 @@ openclaw devices list
openclaw devices approve <requestId>
```
If the app retries pairing with changed auth details (role/scopes/public key),
the previous pending request is superseded and a new `requestId` is created.
Run `openclaw devices list` again before approval.
4. Verify connection:
```bash

View File

@ -21,7 +21,7 @@ Native Linux companion apps are planned. Contributions are welcome if you want t
4. From your laptop: `ssh -N -L 18789:127.0.0.1:18789 <user>@<host>`
5. Open `http://127.0.0.1:18789/` and paste your token
Step-by-step VPS guide: [exe.dev](/install/exe-dev)
Full Linux server guide: [Linux Server](/vps). Step-by-step VPS example: [exe.dev](/install/exe-dev)
## Install

View File

@ -1,5 +1,5 @@
---
summary: "Use Xiaomi MiMo (mimo-v2-flash) with OpenClaw"
summary: "Use Xiaomi MiMo models with OpenClaw"
read_when:
- You want Xiaomi MiMo models in OpenClaw
- You need XIAOMI_API_KEY setup
@ -8,15 +8,18 @@ title: "Xiaomi MiMo"
# Xiaomi MiMo
Xiaomi MiMo is the API platform for **MiMo** models. It provides REST APIs compatible with
OpenAI and Anthropic formats and uses API keys for authentication. Create your API key in
the [Xiaomi MiMo console](https://platform.xiaomimimo.com/#/console/api-keys). OpenClaw uses
the `xiaomi` provider with a Xiaomi MiMo API key.
Xiaomi MiMo is the API platform for **MiMo** models. OpenClaw uses the Xiaomi
OpenAI-compatible endpoint with API-key authentication. Create your API key in the
[Xiaomi MiMo console](https://platform.xiaomimimo.com/#/console/api-keys), then configure the
bundled `xiaomi` provider with that key.
## Model overview
- **mimo-v2-flash**: 262144-token context window, Anthropic Messages API compatible.
- Base URL: `https://api.xiaomimimo.com/anthropic`
- **mimo-v2-flash**: default text model, 262144-token context window
- **mimo-v2-pro**: reasoning text model, 1048576-token context window
- **mimo-v2-omni**: reasoning multimodal model with text and image input, 262144-token context window
- Base URL: `https://api.xiaomimimo.com/v1`
- API: `openai-completions`
- Authorization: `Bearer $XIAOMI_API_KEY`
## CLI setup
@ -37,8 +40,8 @@ openclaw onboard --auth-choice xiaomi-api-key --xiaomi-api-key "$XIAOMI_API_KEY"
mode: "merge",
providers: {
xiaomi: {
baseUrl: "https://api.xiaomimimo.com/anthropic",
api: "anthropic-messages",
baseUrl: "https://api.xiaomimimo.com/v1",
api: "openai-completions",
apiKey: "XIAOMI_API_KEY",
models: [
{
@ -50,6 +53,24 @@ openclaw onboard --auth-choice xiaomi-api-key --xiaomi-api-key "$XIAOMI_API_KEY"
contextWindow: 262144,
maxTokens: 8192,
},
{
id: "mimo-v2-pro",
name: "Xiaomi MiMo V2 Pro",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 32000,
},
{
id: "mimo-v2-omni",
name: "Xiaomi MiMo V2 Omni",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 262144,
maxTokens: 32000,
},
],
},
},
@ -59,6 +80,7 @@ openclaw onboard --auth-choice xiaomi-api-key --xiaomi-api-key "$XIAOMI_API_KEY"
## Notes
- Model ref: `xiaomi/mimo-v2-flash`.
- Default model ref: `xiaomi/mimo-v2-flash`.
- Additional built-in models: `xiaomi/mimo-v2-pro`, `xiaomi/mimo-v2-omni`.
- The provider is injected automatically when `XIAOMI_API_KEY` is set (or an auth profile exists).
- See [/concepts/model-providers](/concepts/model-providers) for provider rules.

View File

@ -1,6 +1,5 @@
---
title: "Showcase"
description: "Real-world OpenClaw projects from the community"
summary: "Community-built projects and integrations powered by OpenClaw"
read_when:
- Looking for real OpenClaw usage examples

View File

@ -1,7 +1,6 @@
---
title: "Diffs"
summary: "Read-only diff viewer and file renderer for agents (optional plugin tool)"
description: "Use the optional Diffs plugin to render before and after text or unified patches as a gateway-hosted diff view, a file (PNG or PDF), or both."
read_when:
- You want agents to show code or markdown edits as diffs
- You want a canvas-ready viewer URL or a rendered diff file

View File

@ -1,7 +1,6 @@
---
title: Lobster
summary: "Typed workflow runtime for OpenClaw with resumable approval gates."
description: Typed workflow runtime for OpenClaw — composable pipelines with approval gates.
read_when:
- You want deterministic multi-step workflows with explicit approvals
- You need to resume a workflow without re-running earlier steps

View File

@ -1,6 +1,5 @@
---
title: "Tool-loop detection"
description: "Configure optional guardrails for preventing repetitive or stalled tool-call loops"
summary: "How to enable and tune guardrails that detect repetitive tool-call loops"
read_when:
- A user reports agents getting stuck repeating tool calls

View File

@ -1,26 +1,32 @@
---
summary: "VPS hosting hub for OpenClaw (Oracle/Fly/Hetzner/GCP/Azure/exe.dev)"
summary: "Run OpenClaw on a Linux server or cloud VPS — provider picker, architecture, and tuning"
read_when:
- You want to run the Gateway in the cloud
- You need a quick map of VPS/hosting guides
title: "VPS Hosting"
- You want to run the Gateway on a Linux server or cloud VPS
- You need a quick map of hosting guides
- You want generic Linux server tuning for OpenClaw
title: "Linux Server"
sidebarTitle: "Linux Server"
---
# VPS Hosting
# Linux Server
Run the OpenClaw Gateway around the clock on a cloud VPS. This page helps you pick a provider, explains how cloud deployments work, and covers generic Linux server tuning that applies to every provider.
Run the OpenClaw Gateway on any Linux server or cloud VPS. This page helps you
pick a provider, explains how cloud deployments work, and covers generic Linux
tuning that applies everywhere.
## Pick a provider
<CardGroup cols={2}>
<Card title="Railway" href="/install/railway">One-click, browser setup</Card>
<Card title="Northflank" href="/install/northflank">One-click, browser setup</Card>
<Card title="Oracle Cloud" href="/platforms/oracle">Always Free ARM tier ($0/month, capacity can be finicky)</Card>
<Card title="DigitalOcean" href="/install/digitalocean">Simple paid VPS</Card>
<Card title="Oracle Cloud" href="/install/oracle">Always Free ARM tier</Card>
<Card title="Fly.io" href="/install/fly">Fly Machines</Card>
<Card title="Hetzner" href="/install/hetzner">Docker on Hetzner VPS</Card>
<Card title="GCP" href="/install/gcp">Compute Engine</Card>
<Card title="Azure" href="/install/azure">Linux VM</Card>
<Card title="exe.dev" href="/install/exe-dev">VM with HTTPS proxy</Card>
<Card title="Raspberry Pi" href="/install/raspberry-pi">ARM self-hosted</Card>
</CardGroup>
**AWS (EC2 / Lightsail / free tier)** also works well.
@ -72,7 +78,7 @@ source ~/.bashrc
- `NODE_COMPILE_CACHE` improves repeated command startup times.
- `OPENCLAW_NO_RESPAWN=1` avoids extra startup overhead from a self-respawn path.
- First command run warms the cache; subsequent runs are faster.
- For Raspberry Pi specifics, see [Raspberry Pi](/platforms/raspberry-pi).
- For Raspberry Pi specifics, see [Raspberry Pi](/install/raspberry-pi).
### systemd tuning checklist (optional)

View File

@ -49,6 +49,10 @@ openclaw devices list
openclaw devices approve <requestId>
```
If the browser retries pairing with changed auth details (role/scopes/public
key), the previous pending request is superseded and a new `requestId` is
created. Re-run `openclaw devices list` before approval.
Once approved, the device is remembered and won't require re-approval unless
you revoke it with `openclaw devices revoke --device <id> --role <role>`. See
[Devices CLI](/cli/devices) for token rotation and revocation.

View File

@ -2,28 +2,31 @@
read_when:
- 你想在 OpenClaw 中使用 Xiaomi MiMo 模型
- 你需要设置 `XIAOMI_API_KEY`
summary: 在 OpenClaw 中使用 Xiaomi MiMo`mimo-v2-flash`
summary: 在 OpenClaw 中使用 Xiaomi MiMo 模型
title: Xiaomi MiMo
x-i18n:
generated_at: "2026-03-16T06:27:26Z"
generated_at: "2026-03-20T01:18:00Z"
model: gpt-5.4
provider: openai
source_hash: 366fd2297b2caf8c5ad944d7f1b6d233b248fe43aedd22a28352ae7f370d2435
source_hash: e0abfbe49f438807ce1c5cf5d7910e930c0d670f447f6eb53ca4e9af61cc0843
source_path: providers/xiaomi.md
workflow: 15
---
# Xiaomi MiMo
Xiaomi MiMo 是 **MiMo** 模型的 API 平台。它提供与
OpenAI 和 Anthropic 格式兼容的 REST API并使用 API key 进行认证。请在
[Xiaomi MiMo console](https://platform.xiaomimimo.com/#/console/api-keys) 中创建你的 API key。OpenClaw 使用
`xiaomi` 提供商配合 Xiaomi MiMo API key
Xiaomi MiMo 是 **MiMo** 模型的 API 平台。OpenClaw 使用 Xiaomi 提供的
OpenAI 兼容端点,并通过 API key 认证。请在
[Xiaomi MiMo console](https://platform.xiaomimimo.com/#/console/api-keys) 中创建你的 API key,然后用它配置内置的
`xiaomi` 提供商。
## 模型概览
- **mimo-v2-flash**262144-token 上下文窗口,兼容 Anthropic Messages API。
- Base URL`https://api.xiaomimimo.com/anthropic`
- **mimo-v2-flash**默认文本模型262144-token 上下文窗口
- **mimo-v2-pro**支持推理的文本模型1048576-token 上下文窗口
- **mimo-v2-omni**支持推理的多模态模型支持文本和图像输入262144-token 上下文窗口
- Base URL`https://api.xiaomimimo.com/v1`
- API`openai-completions`
- 认证方式:`Bearer $XIAOMI_API_KEY`
## CLI 设置
@ -44,8 +47,8 @@ openclaw onboard --auth-choice xiaomi-api-key --xiaomi-api-key "$XIAOMI_API_KEY"
mode: "merge",
providers: {
xiaomi: {
baseUrl: "https://api.xiaomimimo.com/anthropic",
api: "anthropic-messages",
baseUrl: "https://api.xiaomimimo.com/v1",
api: "openai-completions",
apiKey: "XIAOMI_API_KEY",
models: [
{
@ -57,6 +60,24 @@ openclaw onboard --auth-choice xiaomi-api-key --xiaomi-api-key "$XIAOMI_API_KEY"
contextWindow: 262144,
maxTokens: 8192,
},
{
id: "mimo-v2-pro",
name: "Xiaomi MiMo V2 Pro",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 32000,
},
{
id: "mimo-v2-omni",
name: "Xiaomi MiMo V2 Omni",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 262144,
maxTokens: 32000,
},
],
},
},
@ -66,6 +87,7 @@ openclaw onboard --auth-choice xiaomi-api-key --xiaomi-api-key "$XIAOMI_API_KEY"
## 说明
- 模型引用:`xiaomi/mimo-v2-flash`
- 默认模型引用:`xiaomi/mimo-v2-flash`
- 额外内置模型:`xiaomi/mimo-v2-pro``xiaomi/mimo-v2-omni`
- 当设置了 `XIAOMI_API_KEY`(或存在凭证配置文件)时,提供商会自动注入。
- 有关提供商规则,请参阅 [/concepts/model-providers](/concepts/model-providers)。

View File

@ -0,0 +1,41 @@
import { describe, expect, it } from "vitest";
import { formatPendingRequests, type PendingPairingRequest } from "./notify.ts";
describe("device-pair notify pending formatting", () => {
it("includes role and scopes for pending requests", () => {
const pending: PendingPairingRequest[] = [
{
requestId: "req-1",
deviceId: "device-1",
displayName: "dev one",
platform: "ios",
role: "operator",
scopes: ["operator.admin", "operator.read"],
remoteIp: "198.51.100.2",
},
];
const text = formatPendingRequests(pending);
expect(text).toContain("Pending device pairing requests:");
expect(text).toContain("name=dev one");
expect(text).toContain("platform=ios");
expect(text).toContain("role=operator");
expect(text).toContain("scopes=operator.admin, operator.read");
expect(text).toContain("ip=198.51.100.2");
});
it("falls back to roles list and no scopes when role/scopes are absent", () => {
const pending: PendingPairingRequest[] = [
{
requestId: "req-2",
deviceId: "device-2",
roles: ["node", "operator"],
scopes: [],
},
];
const text = formatPendingRequests(pending);
expect(text).toContain("role=node, operator");
expect(text).toContain("scopes=none");
});
});

View File

@ -25,10 +25,33 @@ export type PendingPairingRequest = {
deviceId: string;
displayName?: string;
platform?: string;
role?: string;
roles?: string[];
scopes?: string[];
remoteIp?: string;
ts?: number;
};
function formatStringList(values?: readonly string[]): string {
if (!Array.isArray(values) || values.length === 0) {
return "none";
}
const normalized = values.map((value) => value.trim()).filter((value) => value.length > 0);
return normalized.length > 0 ? normalized.join(", ") : "none";
}
function formatRoleList(request: PendingPairingRequest): string {
const role = request.role?.trim();
if (role) {
return role;
}
return formatStringList(request.roles);
}
function formatScopeList(request: PendingPairingRequest): string {
return formatStringList(request.scopes);
}
export function formatPendingRequests(pending: PendingPairingRequest[]): string {
if (pending.length === 0) {
return "No pending device pairing requests.";
@ -42,6 +65,8 @@ export function formatPendingRequests(pending: PendingPairingRequest[]): string
`- ${req.requestId}`,
label ? `name=${label}` : null,
platform ? `platform=${platform}` : null,
`role=${formatRoleList(req)}`,
`scopes=${formatScopeList(req)}`,
ip ? `ip=${ip}` : null,
].filter(Boolean);
lines.push(parts.join(" · "));
@ -182,11 +207,15 @@ function buildPairingRequestNotificationText(request: PendingPairingRequest): st
const label = request.displayName?.trim() || request.deviceId;
const platform = request.platform?.trim();
const ip = request.remoteIp?.trim();
const role = formatRoleList(request);
const scopes = formatScopeList(request);
const lines = [
"📲 New device pairing request",
`ID: ${request.requestId}`,
`Name: ${label}`,
...(platform ? [`Platform: ${platform}`] : []),
`Role: ${role}`,
`Scopes: ${scopes}`,
...(ip ? [`IP: ${ip}`] : []),
"",
`Approve: /pair approve ${request.requestId}`,

View File

@ -4,7 +4,7 @@
"description": "OpenClaw Discord channel plugin",
"type": "module",
"dependencies": {
"@buape/carbon": "0.0.0-beta-20260216184201",
"@buape/carbon": "0.0.0-beta-20260317045421",
"@discordjs/voice": "^0.19.2",
"discord-api-types": "^0.38.42",
"https-proxy-agent": "^8.0.0",

View File

@ -3,12 +3,12 @@ import {
hasConfiguredSecretInput,
normalizeSecretInputString,
} from "openclaw/plugin-sdk/config-runtime";
import type { DiscordAccountConfig, OpenClawConfig } from "openclaw/plugin-sdk/discord-core";
import {
mergeDiscordAccountConfig,
resolveDefaultDiscordAccountId,
resolveDiscordAccountConfig,
} from "./accounts.js";
import type { DiscordAccountConfig, OpenClawConfig } from "./runtime-api.js";
export type DiscordCredentialStatus = "available" | "configured_unavailable" | "missing";

View File

@ -3,12 +3,8 @@ import {
createAccountListHelpers,
} from "openclaw/plugin-sdk/account-helpers";
import { normalizeAccountId } from "openclaw/plugin-sdk/account-id";
import type {
DiscordAccountConfig,
DiscordActionConfig,
OpenClawConfig,
} from "openclaw/plugin-sdk/discord-core";
import { resolveAccountEntry } from "openclaw/plugin-sdk/routing";
import type { DiscordAccountConfig, DiscordActionConfig, OpenClawConfig } from "./runtime-api.js";
import { resolveDiscordToken } from "./token.js";
export type ResolvedDiscordAccount = {

View File

@ -1,5 +1,7 @@
import { discordSetupWizard as discordSetupWizardImpl } from "./setup-surface.js";
import { createDiscordSetupWizardProxy } from "./setup-core.js";
type DiscordSetupWizard = typeof import("./setup-surface.js").discordSetupWizard;
export const discordSetupWizard: DiscordSetupWizard = { ...discordSetupWizardImpl };
export const discordSetupWizard: DiscordSetupWizard = createDiscordSetupWizardProxy(
async () => (await import("./setup-surface.js")).discordSetupWizard,
);

View File

@ -88,11 +88,25 @@ describe("monitorDiscordProvider", () => {
const getConstructedEventQueue = (): { listenerTimeout?: number } | undefined => {
expect(clientConstructorOptionsMock).toHaveBeenCalledTimes(1);
const opts = clientConstructorOptionsMock.mock.calls[0]?.[0] as {
commandDeploymentMode?: string;
eventQueue?: { listenerTimeout?: number };
};
return opts.eventQueue;
};
const getConstructedClientOptions = (): {
commandDeploymentMode?: string;
eventQueue?: { listenerTimeout?: number };
} => {
expect(clientConstructorOptionsMock).toHaveBeenCalledTimes(1);
return (
(clientConstructorOptionsMock.mock.calls[0]?.[0] as {
commandDeploymentMode?: string;
eventQueue?: { listenerTimeout?: number };
}) ?? {}
);
};
const getHealthProbe = () => {
expect(reconcileAcpThreadBindingsOnStartupMock).toHaveBeenCalledTimes(1);
const firstCall = reconcileAcpThreadBindingsOnStartupMock.mock.calls.at(0) as
@ -539,6 +553,18 @@ describe("monitorDiscordProvider", () => {
);
});
it("configures Carbon reconcile deployment by default", async () => {
const { monitorDiscordProvider } = await import("./provider.js");
await monitorDiscordProvider({
config: baseConfig(),
runtime: baseRuntime(),
});
expect(clientHandleDeployRequestMock).toHaveBeenCalledTimes(1);
expect(getConstructedClientOptions().commandDeploymentMode).toBe("reconcile");
});
it("reports connected status on startup and shutdown", async () => {
const { monitorDiscordProvider } = await import("./provider.js");
const setStatus = vi.fn();

View File

@ -306,6 +306,7 @@ async function deployDiscordCommands(params: {
// errors like Discord 30034 fail fast and don't wedge the provider.
restClient.options.queueRequests = false;
}
params.runtime.log?.("discord: native commands using Carbon reconcile path");
for (let attempt = 1; attempt <= maxAttempts; attempt += 1) {
try {
await params.client.handleDeployRequest();
@ -762,6 +763,7 @@ export async function monitorDiscordProvider(opts: MonitorDiscordOpts = {}) {
baseUrl: "http://localhost",
deploySecret: "a",
clientId: applicationId,
commandDeploymentMode: "reconcile",
publicKey: "a",
token,
autoDeploy: false,
@ -805,7 +807,7 @@ export async function monitorDiscordProvider(opts: MonitorDiscordOpts = {}) {
phase: "deploy-commands:start",
startAt: startupStartedAt,
gateway: lifecycleGateway,
details: `native=${nativeEnabled ? "on" : "off"} commandCount=${commands.length}`,
details: `native=${nativeEnabled ? "on" : "off"} reconcile=on commandCount=${commands.length}`,
});
await deployDiscordCommands({
client,

View File

@ -0,0 +1,163 @@
import { DEFAULT_ACCOUNT_ID, normalizeAccountId } from "openclaw/plugin-sdk/account-id";
import {
hasConfiguredSecretInput,
normalizeSecretInputString,
type OpenClawConfig,
} from "openclaw/plugin-sdk/config-runtime";
import type { DiscordAccountConfig } from "./runtime-api.js";
import { resolveDiscordToken } from "./token.js";
export type InspectedDiscordSetupAccount = {
accountId: string;
enabled: boolean;
token: string;
tokenSource: "env" | "config" | "none";
tokenStatus: "available" | "configured_unavailable" | "missing";
configured: boolean;
config: DiscordAccountConfig;
};
function resolveDiscordAccountEntry(
cfg: OpenClawConfig,
accountId: string,
): DiscordAccountConfig | undefined {
const accounts = cfg.channels?.discord?.accounts;
if (!accounts || typeof accounts !== "object" || Array.isArray(accounts)) {
return undefined;
}
const normalized = normalizeAccountId(accountId);
const direct = accounts[normalized];
if (direct) {
return direct;
}
const matchKey = Object.keys(accounts).find((key) => normalizeAccountId(key) === normalized);
return matchKey ? accounts[matchKey] : undefined;
}
function inspectConfiguredToken(value: unknown): {
token: string;
tokenSource: "config";
tokenStatus: "available" | "configured_unavailable";
} | null {
const normalized = normalizeSecretInputString(value);
if (normalized) {
return {
token: normalized.replace(/^Bot\s+/i, ""),
tokenSource: "config",
tokenStatus: "available",
};
}
if (hasConfiguredSecretInput(value)) {
return {
token: "",
tokenSource: "config",
tokenStatus: "configured_unavailable",
};
}
return null;
}
export function listDiscordSetupAccountIds(cfg: OpenClawConfig): string[] {
const accounts = cfg.channels?.discord?.accounts;
const ids =
accounts && typeof accounts === "object" && !Array.isArray(accounts)
? Object.keys(accounts)
.map((accountId) => normalizeAccountId(accountId))
.filter(Boolean)
: [];
return [...new Set([DEFAULT_ACCOUNT_ID, ...ids])];
}
export function resolveDefaultDiscordSetupAccountId(cfg: OpenClawConfig): string {
return listDiscordSetupAccountIds(cfg)[0] ?? DEFAULT_ACCOUNT_ID;
}
export function resolveDiscordSetupAccountConfig(params: {
cfg: OpenClawConfig;
accountId?: string | null;
}): { accountId: string; config: DiscordAccountConfig } {
const accountId = normalizeAccountId(params.accountId ?? DEFAULT_ACCOUNT_ID);
const { accounts: _ignored, ...base } = (params.cfg.channels?.discord ??
{}) as DiscordAccountConfig & {
accounts?: unknown;
};
return {
accountId,
config: {
...base,
...(resolveDiscordAccountEntry(params.cfg, accountId) ?? {}),
},
};
}
export function inspectDiscordSetupAccount(params: {
cfg: OpenClawConfig;
accountId?: string | null;
}): InspectedDiscordSetupAccount {
const { accountId, config } = resolveDiscordSetupAccountConfig(params);
const enabled = params.cfg.channels?.discord?.enabled !== false && config.enabled !== false;
const accountConfig = resolveDiscordAccountEntry(params.cfg, accountId);
const hasAccountToken = Boolean(
accountConfig &&
Object.prototype.hasOwnProperty.call(accountConfig as Record<string, unknown>, "token"),
);
const accountToken = inspectConfiguredToken(accountConfig?.token);
if (accountToken) {
return {
accountId,
enabled,
token: accountToken.token,
tokenSource: accountToken.tokenSource,
tokenStatus: accountToken.tokenStatus,
configured: true,
config,
};
}
if (hasAccountToken) {
return {
accountId,
enabled,
token: "",
tokenSource: "none",
tokenStatus: "missing",
configured: false,
config,
};
}
const channelToken = inspectConfiguredToken(params.cfg.channels?.discord?.token);
if (channelToken) {
return {
accountId,
enabled,
token: channelToken.token,
tokenSource: channelToken.tokenSource,
tokenStatus: channelToken.tokenStatus,
configured: true,
config,
};
}
const tokenResolution = resolveDiscordToken(params.cfg, { accountId });
if (tokenResolution.token) {
return {
accountId,
enabled,
token: tokenResolution.token,
tokenSource: tokenResolution.source,
tokenStatus: "available",
configured: true,
config,
};
}
return {
accountId,
enabled,
token: "",
tokenSource: "none",
tokenStatus: "missing",
configured: false,
config,
};
}

View File

@ -1,24 +1,27 @@
import { DEFAULT_ACCOUNT_ID } from "openclaw/plugin-sdk/account-id";
import type { DiscordGuildEntry } from "openclaw/plugin-sdk/config-runtime";
import type { OpenClawConfig } from "openclaw/plugin-sdk/config-runtime";
import { createEnvPatchedAccountSetupAdapter } from "openclaw/plugin-sdk/setup-adapter-runtime";
import type {
ChannelSetupAdapter,
ChannelSetupDmPolicy,
ChannelSetupWizard,
} from "openclaw/plugin-sdk/setup-runtime";
import { formatDocsLink } from "openclaw/plugin-sdk/setup-tools";
import {
inspectDiscordSetupAccount,
listDiscordSetupAccountIds,
resolveDiscordSetupAccountConfig,
} from "./setup-account-state.js";
import {
createAccountScopedAllowFromSection,
createAccountScopedGroupAccessSection,
createAllowlistSetupWizardProxy,
createLegacyCompatChannelDmPolicy,
DEFAULT_ACCOUNT_ID,
createEnvPatchedAccountSetupAdapter,
parseMentionOrPrefixedId,
patchChannelConfigForAccount,
setSetupChannelEnabled,
type OpenClawConfig,
} from "openclaw/plugin-sdk/setup";
import {
createAllowlistSetupWizardProxy,
type ChannelSetupAdapter,
type ChannelSetupDmPolicy,
type ChannelSetupWizard,
} from "openclaw/plugin-sdk/setup";
import { formatDocsLink } from "openclaw/plugin-sdk/setup-tools";
import { inspectDiscordAccount } from "./account-inspect.js";
import { listDiscordAccountIds, resolveDiscordAccount } from "./accounts.js";
} from "./setup-runtime-helpers.js";
const channel = "discord" as const;
@ -104,8 +107,8 @@ export function createDiscordSetupWizardBase(handlers: {
configuredScore: 2,
unconfiguredScore: 1,
resolveConfigured: ({ cfg }) =>
listDiscordAccountIds(cfg).some((accountId) => {
const account = inspectDiscordAccount({ cfg, accountId });
listDiscordSetupAccountIds(cfg).some((accountId) => {
const account = inspectDiscordSetupAccount({ cfg, accountId });
return account.configured;
}),
},
@ -122,7 +125,7 @@ export function createDiscordSetupWizardBase(handlers: {
inputPrompt: "Enter Discord bot token",
allowEnv: ({ accountId }: { accountId: string }) => accountId === DEFAULT_ACCOUNT_ID,
inspect: ({ cfg, accountId }: { cfg: OpenClawConfig; accountId: string }) => {
const account = inspectDiscordAccount({ cfg, accountId });
const account = inspectDiscordSetupAccount({ cfg, accountId });
return {
accountConfigured: account.configured,
hasConfiguredValue: account.tokenStatus !== "missing",
@ -136,25 +139,24 @@ export function createDiscordSetupWizardBase(handlers: {
},
],
groupAccess: createAccountScopedGroupAccessSection({
channel,
label: "Discord channels",
placeholder: "My Server/#general, guildId/channelId, #support",
currentPolicy: ({ cfg, accountId }: { cfg: OpenClawConfig; accountId: string }) =>
resolveDiscordAccount({ cfg, accountId }).config.groupPolicy ?? "allowlist",
resolveDiscordSetupAccountConfig({ cfg, accountId }).config.groupPolicy ?? "allowlist",
currentEntries: ({ cfg, accountId }: { cfg: OpenClawConfig; accountId: string }) =>
Object.entries(resolveDiscordAccount({ cfg, accountId }).config.guilds ?? {}).flatMap(
([guildKey, value]) => {
const channels = value?.channels ?? {};
const channelKeys = Object.keys(channels);
if (channelKeys.length === 0) {
const input = /^\d+$/.test(guildKey) ? `guild:${guildKey}` : guildKey;
return [input];
}
return channelKeys.map((channelKey) => `${guildKey}/${channelKey}`);
},
),
Object.entries(
resolveDiscordSetupAccountConfig({ cfg, accountId }).config.guilds ?? {},
).flatMap(([guildKey, value]) => {
const channels = value?.channels ?? {};
const channelKeys = Object.keys(channels);
if (channelKeys.length === 0) {
const input = /^\d+$/.test(guildKey) ? `guild:${guildKey}` : guildKey;
return [input];
}
return channelKeys.map((channelKey) => `${guildKey}/${channelKey}`);
}),
updatePrompt: ({ cfg, accountId }: { cfg: OpenClawConfig; accountId: string }) =>
Boolean(resolveDiscordAccount({ cfg, accountId }).config.guilds),
Boolean(resolveDiscordSetupAccountConfig({ cfg, accountId }).config.guilds),
resolveAllowlist: handlers.resolveGroupAllowlist,
fallbackResolved: (entries) => entries.map((input) => ({ input, resolved: false })),
applyAllowlist: ({
@ -168,7 +170,6 @@ export function createDiscordSetupWizardBase(handlers: {
}) => setDiscordGuildChannelAllowlist(cfg, accountId, resolved as never),
}),
allowFrom: createAccountScopedAllowFromSection({
channel,
credentialInputKey: "token",
helpTitle: "Discord allowlist",
helpLines: [

View File

@ -0,0 +1,436 @@
import { DEFAULT_ACCOUNT_ID, normalizeAccountId } from "openclaw/plugin-sdk/account-id";
import type { OpenClawConfig } from "openclaw/plugin-sdk/config-runtime";
import type {
ChannelSetupDmPolicy,
ChannelSetupWizard,
WizardPrompter,
} from "openclaw/plugin-sdk/setup-runtime";
import {
resolveDefaultDiscordSetupAccountId,
resolveDiscordSetupAccountConfig,
} from "./setup-account-state.js";
export function parseMentionOrPrefixedId(params: {
value: string;
mentionPattern: RegExp;
prefixPattern?: RegExp;
idPattern: RegExp;
normalizeId?: (id: string) => string;
}): string | null {
const trimmed = params.value.trim();
if (!trimmed) {
return null;
}
const mentionMatch = trimmed.match(params.mentionPattern);
if (mentionMatch?.[1]) {
return params.normalizeId ? params.normalizeId(mentionMatch[1]) : mentionMatch[1];
}
if (params.prefixPattern?.test(trimmed)) {
const stripped = trimmed.replace(params.prefixPattern, "").trim();
if (!stripped || !params.idPattern.test(stripped)) {
return null;
}
return params.normalizeId ? params.normalizeId(stripped) : stripped;
}
if (!params.idPattern.test(trimmed)) {
return null;
}
return params.normalizeId ? params.normalizeId(trimmed) : trimmed;
}
function splitSetupEntries(raw: string): string[] {
return raw
.split(/[\n,;]+/g)
.map((entry) => entry.trim())
.filter(Boolean);
}
function mergeAllowFromEntries(
current: Array<string | number> | null | undefined,
additions: Array<string | number>,
): string[] {
const merged = [...(current ?? []), ...additions]
.map((value) => String(value).trim())
.filter(Boolean);
return [...new Set(merged)];
}
function patchDiscordChannelConfigForAccount(params: {
cfg: OpenClawConfig;
accountId: string;
patch: Record<string, unknown>;
}): OpenClawConfig {
const accountId = normalizeAccountId(params.accountId);
const channelConfig = (params.cfg.channels?.discord as Record<string, unknown> | undefined) ?? {};
if (accountId === DEFAULT_ACCOUNT_ID) {
return {
...params.cfg,
channels: {
...params.cfg.channels,
discord: {
...channelConfig,
...params.patch,
enabled: true,
},
},
};
}
const accounts =
(channelConfig.accounts as Record<string, Record<string, unknown>> | undefined) ?? {};
const accountConfig = accounts[accountId] ?? {};
return {
...params.cfg,
channels: {
...params.cfg.channels,
discord: {
...channelConfig,
enabled: true,
accounts: {
...accounts,
[accountId]: {
...accountConfig,
...params.patch,
enabled: true,
},
},
},
},
};
}
export function setSetupChannelEnabled(
cfg: OpenClawConfig,
channel: string,
enabled: boolean,
): OpenClawConfig {
const channelConfig = (cfg.channels?.[channel] as Record<string, unknown> | undefined) ?? {};
return {
...cfg,
channels: {
...cfg.channels,
[channel]: {
...channelConfig,
enabled,
},
},
};
}
export function patchChannelConfigForAccount(params: {
cfg: OpenClawConfig;
channel: "discord";
accountId: string;
patch: Record<string, unknown>;
}): OpenClawConfig {
return patchDiscordChannelConfigForAccount({
cfg: params.cfg,
accountId: params.accountId,
patch: params.patch,
});
}
export function createLegacyCompatChannelDmPolicy(params: {
label: string;
channel: "discord";
promptAllowFrom?: ChannelSetupDmPolicy["promptAllowFrom"];
}): ChannelSetupDmPolicy {
return {
label: params.label,
channel: params.channel,
policyKey: `channels.${params.channel}.dmPolicy`,
allowFromKey: `channels.${params.channel}.allowFrom`,
getCurrent: (cfg) =>
(
cfg.channels?.[params.channel] as
| {
dmPolicy?: "open" | "pairing" | "allowlist";
dm?: { policy?: "open" | "pairing" | "allowlist" };
}
| undefined
)?.dmPolicy ??
(
cfg.channels?.[params.channel] as
| {
dmPolicy?: "open" | "pairing" | "allowlist";
dm?: { policy?: "open" | "pairing" | "allowlist" };
}
| undefined
)?.dm?.policy ??
"pairing",
setPolicy: (cfg, policy) =>
patchDiscordChannelConfigForAccount({
cfg,
accountId: DEFAULT_ACCOUNT_ID,
patch: {
dmPolicy: policy,
...(policy === "open"
? {
allowFrom: [
...new Set(
[
...(((
cfg.channels?.discord as { allowFrom?: Array<string | number> } | undefined
)?.allowFrom ?? []) as Array<string | number>),
"*",
]
.map((value) => String(value).trim())
.filter(Boolean),
),
],
}
: {}),
},
}),
...(params.promptAllowFrom ? { promptAllowFrom: params.promptAllowFrom } : {}),
};
}
async function noteChannelLookupFailure(params: {
prompter: Pick<WizardPrompter, "note">;
label: string;
error: unknown;
}) {
await params.prompter.note(
`Channel lookup failed; keeping entries as typed. ${String(params.error)}`,
params.label,
);
}
export function createAccountScopedAllowFromSection(params: {
credentialInputKey?: NonNullable<ChannelSetupWizard["allowFrom"]>["credentialInputKey"];
helpTitle?: string;
helpLines?: string[];
message: string;
placeholder: string;
invalidWithoutCredentialNote: string;
parseId: NonNullable<NonNullable<ChannelSetupWizard["allowFrom"]>["parseId"]>;
resolveEntries: NonNullable<NonNullable<ChannelSetupWizard["allowFrom"]>["resolveEntries"]>;
}): NonNullable<ChannelSetupWizard["allowFrom"]> {
return {
...(params.helpTitle ? { helpTitle: params.helpTitle } : {}),
...(params.helpLines ? { helpLines: params.helpLines } : {}),
...(params.credentialInputKey ? { credentialInputKey: params.credentialInputKey } : {}),
message: params.message,
placeholder: params.placeholder,
invalidWithoutCredentialNote: params.invalidWithoutCredentialNote,
parseId: params.parseId,
resolveEntries: params.resolveEntries,
apply: ({ cfg, accountId, allowFrom }) =>
patchDiscordChannelConfigForAccount({
cfg,
accountId,
patch: { dmPolicy: "allowlist", allowFrom },
}),
};
}
export function createAccountScopedGroupAccessSection<TResolved>(params: {
label: string;
placeholder: string;
helpTitle?: string;
helpLines?: string[];
skipAllowlistEntries?: boolean;
currentPolicy: NonNullable<ChannelSetupWizard["groupAccess"]>["currentPolicy"];
currentEntries: NonNullable<ChannelSetupWizard["groupAccess"]>["currentEntries"];
updatePrompt: NonNullable<ChannelSetupWizard["groupAccess"]>["updatePrompt"];
resolveAllowlist?: NonNullable<
NonNullable<ChannelSetupWizard["groupAccess"]>["resolveAllowlist"]
>;
fallbackResolved: (entries: string[]) => TResolved;
applyAllowlist: (params: {
cfg: OpenClawConfig;
accountId: string;
resolved: TResolved;
}) => OpenClawConfig;
}): NonNullable<ChannelSetupWizard["groupAccess"]> {
return {
label: params.label,
placeholder: params.placeholder,
...(params.helpTitle ? { helpTitle: params.helpTitle } : {}),
...(params.helpLines ? { helpLines: params.helpLines } : {}),
...(params.skipAllowlistEntries ? { skipAllowlistEntries: true } : {}),
currentPolicy: params.currentPolicy,
currentEntries: params.currentEntries,
updatePrompt: params.updatePrompt,
setPolicy: ({ cfg, accountId, policy }) =>
patchDiscordChannelConfigForAccount({
cfg,
accountId,
patch: { groupPolicy: policy },
}),
...(params.resolveAllowlist
? {
resolveAllowlist: async ({ cfg, accountId, credentialValues, entries, prompter }) => {
try {
return await params.resolveAllowlist!({
cfg,
accountId,
credentialValues,
entries,
prompter,
});
} catch (error) {
await noteChannelLookupFailure({
prompter,
label: params.label,
error,
});
return params.fallbackResolved(entries);
}
},
}
: {}),
applyAllowlist: ({ cfg, accountId, resolved }) =>
params.applyAllowlist({
cfg,
accountId,
resolved: resolved as TResolved,
}),
};
}
export function createAllowlistSetupWizardProxy<TGroupResolved>(params: {
loadWizard: () => Promise<ChannelSetupWizard>;
createBase: (handlers: {
promptAllowFrom: NonNullable<ChannelSetupDmPolicy["promptAllowFrom"]>;
resolveAllowFromEntries: NonNullable<
NonNullable<ChannelSetupWizard["allowFrom"]>["resolveEntries"]
>;
resolveGroupAllowlist: NonNullable<
NonNullable<NonNullable<ChannelSetupWizard["groupAccess"]>["resolveAllowlist"]>
>;
}) => ChannelSetupWizard;
fallbackResolvedGroupAllowlist: (entries: string[]) => TGroupResolved;
}) {
return params.createBase({
promptAllowFrom: async ({ cfg, prompter, accountId }) => {
const wizard = await params.loadWizard();
if (!wizard.dmPolicy?.promptAllowFrom) {
return cfg;
}
return await wizard.dmPolicy.promptAllowFrom({ cfg, prompter, accountId });
},
resolveAllowFromEntries: async ({ cfg, accountId, credentialValues, entries }) => {
const wizard = await params.loadWizard();
if (!wizard.allowFrom) {
return entries.map((input) => ({ input, resolved: false, id: null }));
}
return await wizard.allowFrom.resolveEntries({
cfg,
accountId,
credentialValues,
entries,
});
},
resolveGroupAllowlist: async ({ cfg, accountId, credentialValues, entries, prompter }) => {
const wizard = await params.loadWizard();
if (!wizard.groupAccess?.resolveAllowlist) {
return params.fallbackResolvedGroupAllowlist(entries) as Awaited<
ReturnType<
NonNullable<NonNullable<ChannelSetupWizard["groupAccess"]>["resolveAllowlist"]>
>
>;
}
return (await wizard.groupAccess.resolveAllowlist({
cfg,
accountId,
credentialValues,
entries,
prompter,
})) as Awaited<
ReturnType<NonNullable<NonNullable<ChannelSetupWizard["groupAccess"]>["resolveAllowlist"]>>
>;
},
});
}
export async function resolveEntriesWithOptionalToken<TResult>(params: {
token?: string | null;
entries: string[];
buildWithoutToken: (input: string) => TResult;
resolveEntries: (params: { token: string; entries: string[] }) => Promise<TResult[]>;
}): Promise<TResult[]> {
const token = params.token?.trim();
if (!token) {
return params.entries.map(params.buildWithoutToken);
}
return await params.resolveEntries({
token,
entries: params.entries,
});
}
export async function promptLegacyChannelAllowFromForAccount(params: {
cfg: OpenClawConfig;
prompter: WizardPrompter;
accountId?: string;
noteTitle: string;
noteLines: string[];
message: string;
placeholder: string;
parseId: (value: string) => string | null;
invalidWithoutTokenNote: string;
resolveEntries: (params: {
token: string;
entries: string[];
}) => Promise<Array<{ input: string; resolved: boolean; id?: string | null }>>;
resolveToken: (accountId: string) => string | null | undefined;
resolveExisting: (accountId: string, cfg: OpenClawConfig) => Array<string | number>;
}): Promise<OpenClawConfig> {
const accountId = normalizeAccountId(
params.accountId ?? resolveDefaultDiscordSetupAccountId(params.cfg),
);
await params.prompter.note(params.noteLines.join("\n"), params.noteTitle);
const token = params.resolveToken(accountId);
const existing = params.resolveExisting(accountId, params.cfg);
while (true) {
const entry = await params.prompter.text({
message: params.message,
placeholder: params.placeholder,
initialValue: existing[0] ? String(existing[0]) : undefined,
validate: (value) => (String(value ?? "").trim() ? undefined : "Required"),
});
const parts = splitSetupEntries(String(entry));
if (!token) {
const ids = parts.map(params.parseId).filter(Boolean) as string[];
if (ids.length !== parts.length) {
await params.prompter.note(params.invalidWithoutTokenNote, params.noteTitle);
continue;
}
return patchDiscordChannelConfigForAccount({
cfg: params.cfg,
accountId,
patch: {
dmPolicy: "allowlist",
allowFrom: mergeAllowFromEntries(existing, ids),
},
});
}
const results = await params.resolveEntries({ token, entries: parts }).catch(() => null);
if (!results) {
await params.prompter.note("Failed to resolve usernames. Try again.", params.noteTitle);
continue;
}
const unresolved = results.filter((result) => !result.resolved || !result.id);
if (unresolved.length > 0) {
await params.prompter.note(
`Could not resolve: ${unresolved.map((result) => result.input).join(", ")}`,
params.noteTitle,
);
continue;
}
return patchDiscordChannelConfigForAccount({
cfg: params.cfg,
accountId,
patch: {
dmPolicy: "allowlist",
allowFrom: mergeAllowFromEntries(
existing,
results.map((result) => result.id as string),
),
},
});
}
}

View File

@ -1,20 +1,26 @@
import {
resolveEntriesWithOptionalToken,
type OpenClawConfig,
promptLegacyChannelAllowFromForAccount,
type WizardPrompter,
} from "openclaw/plugin-sdk/setup";
import { type ChannelSetupWizard } from "openclaw/plugin-sdk/setup";
type ChannelSetupWizard,
} from "openclaw/plugin-sdk/setup-runtime";
import { formatDocsLink } from "openclaw/plugin-sdk/setup-tools";
import { resolveDefaultDiscordAccountId, resolveDiscordAccount } from "./accounts.js";
import { resolveDiscordChannelAllowlist } from "./resolve-channels.js";
import { resolveDiscordUserAllowlist } from "./resolve-users.js";
import {
resolveDefaultDiscordSetupAccountId,
resolveDiscordSetupAccountConfig,
} from "./setup-account-state.js";
import {
createDiscordSetupWizardBase,
DISCORD_TOKEN_HELP_LINES,
parseDiscordAllowFromId,
setDiscordGuildChannelAllowlist,
} from "./setup-core.js";
import {
promptLegacyChannelAllowFromForAccount,
resolveEntriesWithOptionalToken,
} from "./setup-runtime-helpers.js";
import { resolveDiscordToken } from "./token.js";
const channel = "discord" as const;
@ -48,13 +54,8 @@ async function promptDiscordAllowFrom(params: {
}): Promise<OpenClawConfig> {
return await promptLegacyChannelAllowFromForAccount({
cfg: params.cfg,
channel,
prompter: params.prompter,
accountId: params.accountId,
defaultAccountId: resolveDefaultDiscordAccountId(params.cfg),
resolveAccount: (cfg, accountId) => resolveDiscordAccount({ cfg, accountId }),
resolveExisting: (account) => account.config.allowFrom ?? account.config.dm?.allowFrom ?? [],
resolveToken: (account) => account.token,
noteTitle: "Discord allowlist",
noteLines: [
"Allowlist Discord DMs by username (we resolve to user ids).",
@ -69,6 +70,11 @@ async function promptDiscordAllowFrom(params: {
placeholder: "@alice, 123456789012345678",
parseId: parseDiscordAllowFromId,
invalidWithoutTokenNote: "Bot token missing; use numeric user ids (or mention form) only.",
resolveExisting: (accountId, cfg) => {
const account = resolveDiscordSetupAccountConfig({ cfg, accountId }).config;
return account.allowFrom ?? account.dm?.allowFrom ?? [];
},
resolveToken: (accountId) => resolveDiscordToken(params.cfg, { accountId }).token,
resolveEntries: async ({ token, entries }) =>
(
await resolveDiscordUserAllowlist({
@ -91,7 +97,7 @@ async function resolveDiscordGroupAllowlist(params: {
}) {
return await resolveEntriesWithOptionalToken({
token:
resolveDiscordAccount({ cfg: params.cfg, accountId: params.accountId }).token ||
resolveDiscordToken(params.cfg, { accountId: params.accountId }).token ||
(typeof params.credentialValues.token === "string" ? params.credentialValues.token : ""),
entries: params.entries,
buildWithoutToken: (input) => ({
@ -111,7 +117,7 @@ export const discordSetupWizard: ChannelSetupWizard = createDiscordSetupWizardBa
resolveAllowFromEntries: async ({ cfg, accountId, credentialValues, entries }) =>
await resolveDiscordAllowFromEntries({
token:
resolveDiscordAccount({ cfg, accountId }).token ||
resolveDiscordToken(cfg, { accountId }).token ||
(typeof credentialValues.token === "string" ? credentialValues.token : ""),
entries,
}),

View File

@ -544,6 +544,15 @@ function registerEventHandlers(
}),
},
};
const syntheticMessageId = syntheticEvent.message.message_id;
if (await hasProcessedFeishuMessage(syntheticMessageId, accountId, log)) {
log(`feishu[${accountId}]: dropping duplicate bot-menu event for ${syntheticMessageId}`);
return;
}
if (!tryBeginFeishuMessageProcessing(syntheticMessageId, accountId)) {
log(`feishu[${accountId}]: dropping in-flight bot-menu event for ${syntheticMessageId}`);
return;
}
const handleLegacyMenu = () =>
handleFeishuMessage({
cfg,
@ -553,6 +562,7 @@ function registerEventHandlers(
runtime,
chatHistories,
accountId,
processingClaimHeld: true,
});
const promise = maybeHandleFeishuQuickActionMenu({
@ -561,12 +571,19 @@ function registerEventHandlers(
operatorOpenId,
runtime,
accountId,
}).then((handledMenu) => {
if (handledMenu) {
return;
}
return handleLegacyMenu();
});
})
.then(async (handledMenu) => {
if (handledMenu) {
await recordProcessedFeishuMessage(syntheticMessageId, accountId, log);
releaseFeishuMessageProcessing(syntheticMessageId, accountId);
return;
}
return await handleLegacyMenu();
})
.catch((err) => {
releaseFeishuMessageProcessing(syntheticMessageId, accountId);
throw err;
});
if (fireAndForget) {
promise.catch((err) => {
error(`feishu[${accountId}]: error handling bot menu event: ${String(err)}`);

View File

@ -0,0 +1,381 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { createPluginRuntimeMock } from "../../../test/helpers/extensions/plugin-runtime-mock.js";
import type { ClawdbotConfig, PluginRuntime, RuntimeEnv } from "../runtime-api.js";
import { monitorSingleAccount } from "./monitor.account.js";
import { setFeishuRuntime } from "./runtime.js";
import type { ResolvedFeishuAccount } from "./types.js";
const createEventDispatcherMock = vi.hoisted(() => vi.fn());
const monitorWebSocketMock = vi.hoisted(() => vi.fn(async () => {}));
const monitorWebhookMock = vi.hoisted(() => vi.fn(async () => {}));
const createFeishuThreadBindingManagerMock = vi.hoisted(() => vi.fn(() => ({ stop: vi.fn() })));
const resolveBoundConversationMock = vi.hoisted(() => vi.fn(() => null));
const touchBindingMock = vi.hoisted(() => vi.fn());
const resolveAgentRouteMock = vi.hoisted(() => vi.fn());
const resolveConfiguredBindingRouteMock = vi.hoisted(() => vi.fn());
const ensureConfiguredBindingRouteReadyMock = vi.hoisted(() => vi.fn());
const dispatchReplyFromConfigMock = vi.hoisted(() => vi.fn());
const withReplyDispatcherMock = vi.hoisted(() => vi.fn());
const finalizeInboundContextMock = vi.hoisted(() => vi.fn((ctx) => ctx));
const sendMessageFeishuMock = vi.hoisted(() =>
vi.fn(async () => ({ messageId: "om_notice", chatId: "oc_group_topic" })),
);
const getMessageFeishuMock = vi.hoisted(() => vi.fn(async () => null));
const listFeishuThreadMessagesMock = vi.hoisted(() => vi.fn(async () => []));
let handlers: Record<string, (data: unknown) => Promise<void>> = {};
let lastRuntime: RuntimeEnv | null = null;
const originalStateDir = process.env.OPENCLAW_STATE_DIR;
vi.mock("./client.js", async () => {
const actual = await vi.importActual<typeof import("./client.js")>("./client.js");
return {
...actual,
createEventDispatcher: createEventDispatcherMock,
};
});
vi.mock("./monitor.transport.js", () => ({
monitorWebSocket: monitorWebSocketMock,
monitorWebhook: monitorWebhookMock,
}));
vi.mock("./thread-bindings.js", () => ({
createFeishuThreadBindingManager: createFeishuThreadBindingManagerMock,
}));
vi.mock("./send.js", () => ({
sendMessageFeishu: sendMessageFeishuMock,
getMessageFeishu: getMessageFeishuMock,
listFeishuThreadMessages: listFeishuThreadMessagesMock,
}));
vi.mock("openclaw/plugin-sdk/conversation-runtime", async (importOriginal) => {
const actual = await importOriginal<typeof import("openclaw/plugin-sdk/conversation-runtime")>();
return {
...actual,
resolveConfiguredBindingRoute: (params: unknown) => resolveConfiguredBindingRouteMock(params),
ensureConfiguredBindingRouteReady: (params: unknown) =>
ensureConfiguredBindingRouteReadyMock(params),
getSessionBindingService: () => ({
resolveByConversation: resolveBoundConversationMock,
touch: touchBindingMock,
}),
};
});
vi.mock("../../../src/infra/outbound/session-binding-service.js", () => ({
getSessionBindingService: () => ({
resolveByConversation: resolveBoundConversationMock,
touch: touchBindingMock,
}),
}));
function createLifecycleConfig(): ClawdbotConfig {
return {
session: { mainKey: "main", scope: "per-sender" },
channels: {
feishu: {
enabled: true,
groupPolicy: "open",
requireMention: false,
resolveSenderNames: false,
allowFrom: ["ou_sender_1"],
accounts: {
"acct-acp": {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "websocket",
groupPolicy: "open",
requireMention: false,
resolveSenderNames: false,
groups: {
oc_group_topic: {
requireMention: false,
groupSessionScope: "group_topic",
replyInThread: "enabled",
},
},
},
},
},
},
messages: {
inbound: {
debounceMs: 0,
byChannel: {
feishu: 0,
},
},
},
} as ClawdbotConfig;
}
function createLifecycleAccount(): ResolvedFeishuAccount {
return {
accountId: "acct-acp",
selectionSource: "explicit",
enabled: true,
configured: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
domain: "feishu",
config: {
enabled: true,
connectionMode: "websocket",
groupPolicy: "open",
requireMention: false,
resolveSenderNames: false,
groups: {
oc_group_topic: {
requireMention: false,
groupSessionScope: "group_topic",
replyInThread: "enabled",
},
},
allowFrom: ["ou_sender_1"],
},
} as unknown as ResolvedFeishuAccount;
}
function createRuntimeEnv(): RuntimeEnv {
return {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
} as RuntimeEnv;
}
function createTopicEvent(messageId: string) {
return {
sender: {
sender_id: { open_id: "ou_sender_1" },
sender_type: "user",
},
message: {
message_id: messageId,
root_id: "om_topic_root_1",
thread_id: "omt_topic_1",
chat_id: "oc_group_topic",
chat_type: "group" as const,
message_type: "text",
content: JSON.stringify({ text: "hello topic" }),
create_time: "1710000000000",
},
};
}
async function settleAsyncWork(): Promise<void> {
for (let i = 0; i < 6; i += 1) {
await Promise.resolve();
await new Promise((resolve) => setTimeout(resolve, 0));
}
}
async function setupLifecycleMonitor() {
const register = vi.fn((registered: Record<string, (data: unknown) => Promise<void>>) => {
handlers = registered;
});
createEventDispatcherMock.mockReturnValue({ register });
lastRuntime = createRuntimeEnv();
await monitorSingleAccount({
cfg: createLifecycleConfig(),
account: createLifecycleAccount(),
runtime: lastRuntime,
botOpenIdSource: {
kind: "prefetched",
botOpenId: "ou_bot_1",
botName: "Bot",
},
});
const onMessage = handlers["im.message.receive_v1"];
if (!onMessage) {
throw new Error("missing im.message.receive_v1 handler");
}
return onMessage;
}
describe("Feishu ACP-init failure lifecycle", () => {
beforeEach(() => {
vi.clearAllMocks();
handlers = {};
lastRuntime = null;
process.env.OPENCLAW_STATE_DIR = `/tmp/openclaw-feishu-acp-failure-${Date.now()}-${Math.random().toString(36).slice(2)}`;
resolveBoundConversationMock.mockReturnValue(null);
resolveAgentRouteMock.mockReturnValue({
agentId: "main",
channel: "feishu",
accountId: "acct-acp",
sessionKey: "agent:main:feishu:group:oc_group_topic",
mainSessionKey: "agent:main:main",
matchedBy: "default",
});
resolveConfiguredBindingRouteMock.mockReturnValue({
bindingResolution: {
configuredBinding: {
spec: {
channel: "feishu",
accountId: "acct-acp",
conversationId: "oc_group_topic:topic:om_topic_root_1",
agentId: "codex",
mode: "persistent",
},
record: {
bindingId: "config:acp:feishu:acct-acp:oc_group_topic:topic:om_topic_root_1",
targetSessionKey: "agent:codex:acp:binding:feishu:acct-acp:abc123",
targetKind: "session",
conversation: {
channel: "feishu",
accountId: "acct-acp",
conversationId: "oc_group_topic:topic:om_topic_root_1",
parentConversationId: "oc_group_topic",
},
status: "active",
boundAt: 0,
metadata: { source: "config" },
},
},
statefulTarget: {
kind: "stateful",
driverId: "acp",
sessionKey: "agent:codex:acp:binding:feishu:acct-acp:abc123",
agentId: "codex",
},
},
configuredBinding: {
spec: {
channel: "feishu",
accountId: "acct-acp",
conversationId: "oc_group_topic:topic:om_topic_root_1",
agentId: "codex",
mode: "persistent",
},
},
route: {
agentId: "codex",
channel: "feishu",
accountId: "acct-acp",
sessionKey: "agent:codex:acp:binding:feishu:acct-acp:abc123",
mainSessionKey: "agent:codex:main",
matchedBy: "binding.channel",
},
});
ensureConfiguredBindingRouteReadyMock.mockResolvedValue({
ok: false,
error: "runtime unavailable",
});
dispatchReplyFromConfigMock.mockResolvedValue({
queuedFinal: false,
counts: { final: 0 },
});
withReplyDispatcherMock.mockImplementation(async ({ run }) => await run());
setFeishuRuntime(
createPluginRuntimeMock({
channel: {
debounce: {
resolveInboundDebounceMs: vi.fn(() => 0),
createInboundDebouncer: <T>(params: {
onFlush?: (items: T[]) => Promise<void>;
onError?: (err: unknown, items: T[]) => void;
}) => ({
enqueue: async (item: T) => {
try {
await params.onFlush?.([item]);
} catch (err) {
params.onError?.(err, [item]);
}
},
flushKey: async () => {},
}),
},
text: {
hasControlCommand: vi.fn(() => false),
},
routing: {
resolveAgentRoute:
resolveAgentRouteMock as unknown as PluginRuntime["channel"]["routing"]["resolveAgentRoute"],
},
reply: {
resolveEnvelopeFormatOptions: vi.fn(() => ({})),
formatAgentEnvelope: vi.fn((params: { body: string }) => params.body),
finalizeInboundContext:
finalizeInboundContextMock as unknown as PluginRuntime["channel"]["reply"]["finalizeInboundContext"],
dispatchReplyFromConfig:
dispatchReplyFromConfigMock as unknown as PluginRuntime["channel"]["reply"]["dispatchReplyFromConfig"],
withReplyDispatcher:
withReplyDispatcherMock as unknown as PluginRuntime["channel"]["reply"]["withReplyDispatcher"],
},
commands: {
shouldComputeCommandAuthorized: vi.fn(() => false),
resolveCommandAuthorizedFromAuthorizers: vi.fn(() => false),
},
session: {
readSessionUpdatedAt: vi.fn(),
resolveStorePath: vi.fn(() => "/tmp/feishu-acp-failure-sessions.json"),
},
pairing: {
readAllowFromStore: vi.fn().mockResolvedValue([]),
upsertPairingRequest: vi.fn(),
buildPairingReply: vi.fn(),
},
},
media: {
detectMime: vi.fn(async () => "text/plain"),
},
}) as unknown as PluginRuntime,
);
});
afterEach(() => {
if (originalStateDir === undefined) {
delete process.env.OPENCLAW_STATE_DIR;
return;
}
process.env.OPENCLAW_STATE_DIR = originalStateDir;
});
it("sends one ACP failure notice to the topic root across replay", async () => {
const onMessage = await setupLifecycleMonitor();
const event = createTopicEvent("om_topic_msg_1");
await onMessage(event);
await settleAsyncWork();
await onMessage(event);
await settleAsyncWork();
expect(lastRuntime?.error).not.toHaveBeenCalled();
expect(resolveConfiguredBindingRouteMock).toHaveBeenCalledTimes(1);
expect(ensureConfiguredBindingRouteReadyMock).toHaveBeenCalledTimes(1);
expect(sendMessageFeishuMock).toHaveBeenCalledTimes(1);
expect(sendMessageFeishuMock).toHaveBeenCalledWith(
expect.objectContaining({
accountId: "acct-acp",
to: "chat:oc_group_topic",
replyToMessageId: "om_topic_root_1",
replyInThread: true,
text: expect.stringContaining("runtime unavailable"),
}),
);
expect(dispatchReplyFromConfigMock).not.toHaveBeenCalled();
});
it("does not duplicate the ACP failure notice after the first send succeeds", async () => {
const onMessage = await setupLifecycleMonitor();
const event = createTopicEvent("om_topic_msg_2");
await onMessage(event);
await settleAsyncWork();
await onMessage(event);
await settleAsyncWork();
expect(sendMessageFeishuMock).toHaveBeenCalledTimes(1);
expect(lastRuntime?.error).not.toHaveBeenCalled();
});
});

View File

@ -0,0 +1,364 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { createPluginRuntimeMock } from "../../../test/helpers/extensions/plugin-runtime-mock.js";
import type { ClawdbotConfig, PluginRuntime, RuntimeEnv } from "../runtime-api.js";
import { monitorSingleAccount } from "./monitor.account.js";
import { setFeishuRuntime } from "./runtime.js";
import type { ResolvedFeishuAccount } from "./types.js";
type BoundConversation = {
bindingId: string;
targetSessionKey: string;
};
const createEventDispatcherMock = vi.hoisted(() => vi.fn());
const monitorWebSocketMock = vi.hoisted(() => vi.fn(async () => {}));
const monitorWebhookMock = vi.hoisted(() => vi.fn(async () => {}));
const createFeishuThreadBindingManagerMock = vi.hoisted(() => vi.fn(() => ({ stop: vi.fn() })));
const createFeishuReplyDispatcherMock = vi.hoisted(() => vi.fn());
const resolveBoundConversationMock = vi.hoisted(() =>
vi.fn<() => BoundConversation | null>(() => null),
);
const touchBindingMock = vi.hoisted(() => vi.fn());
const resolveAgentRouteMock = vi.hoisted(() => vi.fn());
const dispatchReplyFromConfigMock = vi.hoisted(() => vi.fn());
const withReplyDispatcherMock = vi.hoisted(() => vi.fn());
const finalizeInboundContextMock = vi.hoisted(() => vi.fn((ctx) => ctx));
const sendCardFeishuMock = vi.hoisted(() =>
vi.fn(async () => ({ messageId: "om_card_sent", chatId: "p2p:ou_user1" })),
);
const getMessageFeishuMock = vi.hoisted(() => vi.fn(async () => null));
const listFeishuThreadMessagesMock = vi.hoisted(() => vi.fn(async () => []));
const sendMessageFeishuMock = vi.hoisted(() =>
vi.fn(async () => ({ messageId: "om_sent", chatId: "p2p:ou_user1" })),
);
let handlers: Record<string, (data: unknown) => Promise<void>> = {};
let lastRuntime: RuntimeEnv | null = null;
const originalStateDir = process.env.OPENCLAW_STATE_DIR;
vi.mock("./client.js", async () => {
const actual = await vi.importActual<typeof import("./client.js")>("./client.js");
return {
...actual,
createEventDispatcher: createEventDispatcherMock,
};
});
vi.mock("./monitor.transport.js", () => ({
monitorWebSocket: monitorWebSocketMock,
monitorWebhook: monitorWebhookMock,
}));
vi.mock("./thread-bindings.js", () => ({
createFeishuThreadBindingManager: createFeishuThreadBindingManagerMock,
}));
vi.mock("./reply-dispatcher.js", () => ({
createFeishuReplyDispatcher: createFeishuReplyDispatcherMock,
}));
vi.mock("./send.js", () => ({
sendCardFeishu: sendCardFeishuMock,
getMessageFeishu: getMessageFeishuMock,
listFeishuThreadMessages: listFeishuThreadMessagesMock,
sendMessageFeishu: sendMessageFeishuMock,
}));
vi.mock("openclaw/plugin-sdk/conversation-runtime", async (importOriginal) => {
const actual = await importOriginal<typeof import("openclaw/plugin-sdk/conversation-runtime")>();
return {
...actual,
getSessionBindingService: () => ({
resolveByConversation: resolveBoundConversationMock,
touch: touchBindingMock,
}),
};
});
vi.mock("../../../src/infra/outbound/session-binding-service.js", () => ({
getSessionBindingService: () => ({
resolveByConversation: resolveBoundConversationMock,
touch: touchBindingMock,
}),
}));
function createLifecycleConfig(): ClawdbotConfig {
return {
channels: {
feishu: {
enabled: true,
dmPolicy: "open",
requireMention: false,
resolveSenderNames: false,
accounts: {
"acct-menu": {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "websocket",
dmPolicy: "open",
requireMention: false,
resolveSenderNames: false,
},
},
},
},
messages: {
inbound: {
debounceMs: 0,
byChannel: {
feishu: 0,
},
},
},
} as ClawdbotConfig;
}
function createLifecycleAccount(): ResolvedFeishuAccount {
return {
accountId: "acct-menu",
selectionSource: "explicit",
enabled: true,
configured: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
domain: "feishu",
config: {
enabled: true,
connectionMode: "websocket",
dmPolicy: "open",
requireMention: false,
resolveSenderNames: false,
},
} as unknown as ResolvedFeishuAccount;
}
function createRuntimeEnv(): RuntimeEnv {
return {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
} as RuntimeEnv;
}
function createBotMenuEvent(params: { eventKey: string; timestamp: string }) {
return {
event_key: params.eventKey,
timestamp: params.timestamp,
operator: {
operator_id: {
open_id: "ou_user1",
user_id: "user_1",
union_id: "union_1",
},
},
};
}
async function settleAsyncWork(): Promise<void> {
for (let i = 0; i < 6; i += 1) {
await Promise.resolve();
await new Promise((resolve) => setTimeout(resolve, 0));
}
}
async function setupLifecycleMonitor() {
const register = vi.fn((registered: Record<string, (data: unknown) => Promise<void>>) => {
handlers = registered;
});
createEventDispatcherMock.mockReturnValue({ register });
lastRuntime = createRuntimeEnv();
await monitorSingleAccount({
cfg: createLifecycleConfig(),
account: createLifecycleAccount(),
runtime: lastRuntime,
botOpenIdSource: {
kind: "prefetched",
botOpenId: "ou_bot_1",
botName: "Bot",
},
});
const onBotMenu = handlers["application.bot.menu_v6"];
if (!onBotMenu) {
throw new Error("missing application.bot.menu_v6 handler");
}
return onBotMenu;
}
describe("Feishu bot-menu lifecycle", () => {
beforeEach(() => {
vi.clearAllMocks();
handlers = {};
lastRuntime = null;
process.env.OPENCLAW_STATE_DIR = `/tmp/openclaw-feishu-bot-menu-${Date.now()}-${Math.random().toString(36).slice(2)}`;
const dispatcher = {
sendToolResult: vi.fn(() => false),
sendBlockReply: vi.fn(() => false),
sendFinalReply: vi.fn(async () => true),
waitForIdle: vi.fn(async () => {}),
getQueuedCounts: vi.fn(() => ({ tool: 0, block: 0, final: 0 })),
markComplete: vi.fn(),
};
createFeishuReplyDispatcherMock.mockReturnValue({
dispatcher,
replyOptions: {},
markDispatchIdle: vi.fn(),
});
resolveBoundConversationMock.mockImplementation(() => ({
bindingId: "binding-menu",
targetSessionKey: "agent:bound-agent:feishu:direct:ou_user1",
}));
resolveAgentRouteMock.mockReturnValue({
agentId: "main",
channel: "feishu",
accountId: "acct-menu",
sessionKey: "agent:main:feishu:direct:ou_user1",
mainSessionKey: "agent:main:main",
matchedBy: "default",
});
dispatchReplyFromConfigMock.mockImplementation(async ({ dispatcher }) => {
await dispatcher.sendFinalReply({ text: "menu reply once" });
return {
queuedFinal: false,
counts: { final: 1 },
};
});
withReplyDispatcherMock.mockImplementation(async ({ run }) => await run());
setFeishuRuntime(
createPluginRuntimeMock({
channel: {
debounce: {
resolveInboundDebounceMs: vi.fn(() => 0),
createInboundDebouncer: <T>(params: {
onFlush?: (items: T[]) => Promise<void>;
onError?: (err: unknown, items: T[]) => void;
}) => ({
enqueue: async (item: T) => {
try {
await params.onFlush?.([item]);
} catch (err) {
params.onError?.(err, [item]);
}
},
flushKey: async () => {},
}),
},
text: {
hasControlCommand: vi.fn(() => false),
},
routing: {
resolveAgentRoute:
resolveAgentRouteMock as unknown as PluginRuntime["channel"]["routing"]["resolveAgentRoute"],
},
reply: {
resolveEnvelopeFormatOptions: vi.fn(() => ({})),
formatAgentEnvelope: vi.fn((params: { body: string }) => params.body),
finalizeInboundContext:
finalizeInboundContextMock as unknown as PluginRuntime["channel"]["reply"]["finalizeInboundContext"],
dispatchReplyFromConfig:
dispatchReplyFromConfigMock as unknown as PluginRuntime["channel"]["reply"]["dispatchReplyFromConfig"],
withReplyDispatcher:
withReplyDispatcherMock as unknown as PluginRuntime["channel"]["reply"]["withReplyDispatcher"],
},
commands: {
shouldComputeCommandAuthorized: vi.fn(() => false),
resolveCommandAuthorizedFromAuthorizers: vi.fn(() => false),
},
session: {
readSessionUpdatedAt: vi.fn(),
resolveStorePath: vi.fn(() => "/tmp/feishu-bot-menu-sessions.json"),
},
pairing: {
readAllowFromStore: vi.fn().mockResolvedValue([]),
upsertPairingRequest: vi.fn(),
buildPairingReply: vi.fn(),
},
},
media: {
detectMime: vi.fn(async () => "text/plain"),
},
}) as unknown as PluginRuntime,
);
});
afterEach(() => {
if (originalStateDir === undefined) {
delete process.env.OPENCLAW_STATE_DIR;
return;
}
process.env.OPENCLAW_STATE_DIR = originalStateDir;
});
it("opens one launcher card across duplicate quick-actions replay", async () => {
const onBotMenu = await setupLifecycleMonitor();
const event = createBotMenuEvent({
eventKey: "quick-actions",
timestamp: "1700000000000",
});
await onBotMenu(event);
await settleAsyncWork();
await onBotMenu(event);
await settleAsyncWork();
expect(lastRuntime?.error).not.toHaveBeenCalled();
expect(sendCardFeishuMock).toHaveBeenCalledTimes(1);
expect(sendCardFeishuMock).toHaveBeenCalledWith(
expect.objectContaining({
accountId: "acct-menu",
to: "user:ou_user1",
}),
);
expect(dispatchReplyFromConfigMock).not.toHaveBeenCalled();
expect(createFeishuReplyDispatcherMock).not.toHaveBeenCalled();
});
it("falls back once to the legacy routed reply path when launcher rendering fails", async () => {
const onBotMenu = await setupLifecycleMonitor();
const event = createBotMenuEvent({
eventKey: "quick-actions",
timestamp: "1700000000001",
});
sendCardFeishuMock.mockRejectedValueOnce(new Error("boom"));
await onBotMenu(event);
await settleAsyncWork();
await onBotMenu(event);
await settleAsyncWork();
expect(lastRuntime?.error).not.toHaveBeenCalled();
expect(sendCardFeishuMock).toHaveBeenCalledTimes(1);
expect(dispatchReplyFromConfigMock).toHaveBeenCalledTimes(1);
expect(createFeishuReplyDispatcherMock).toHaveBeenCalledTimes(1);
expect(createFeishuReplyDispatcherMock).toHaveBeenCalledWith(
expect.objectContaining({
accountId: "acct-menu",
chatId: "p2p:ou_user1",
replyToMessageId: "bot-menu:quick-actions:1700000000001",
}),
);
expect(finalizeInboundContextMock).toHaveBeenCalledWith(
expect.objectContaining({
AccountId: "acct-menu",
SessionKey: "agent:bound-agent:feishu:direct:ou_user1",
MessageSid: "bot-menu:quick-actions:1700000000001",
}),
);
expect(touchBindingMock).toHaveBeenCalledWith("binding-menu");
const dispatcher = createFeishuReplyDispatcherMock.mock.results[0]?.value.dispatcher as {
sendFinalReply: ReturnType<typeof vi.fn>;
};
expect(dispatcher.sendFinalReply).toHaveBeenCalledTimes(1);
});
});

View File

@ -1,4 +1,4 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { hasControlCommand } from "../../../src/auto-reply/command-detection.js";
import {
createInboundDebouncer,
@ -18,6 +18,7 @@ const sendCardFeishuMock = vi.hoisted(() => vi.fn(async () => ({ messageId: "m1"
const createFeishuThreadBindingManagerMock = vi.hoisted(() => vi.fn(() => ({ stop: vi.fn() })));
let handlers: Record<string, (data: unknown) => Promise<void>> = {};
const originalStateDir = process.env.OPENCLAW_STATE_DIR;
vi.mock("./client.js", () => ({
createEventDispatcher: createEventDispatcherMock,
@ -63,6 +64,20 @@ function buildAccount(): ResolvedFeishuAccount {
} as ResolvedFeishuAccount;
}
function createBotMenuEvent(params: { eventKey: string; timestamp: string }) {
return {
event_key: params.eventKey,
timestamp: params.timestamp,
operator: {
operator_id: {
open_id: "ou_user1",
user_id: "user_1",
union_id: "union_1",
},
},
};
}
async function registerHandlers() {
setFeishuRuntime(
createPluginRuntimeMock({
@ -108,22 +123,21 @@ describe("Feishu bot menu handler", () => {
beforeEach(() => {
handlers = {};
vi.clearAllMocks();
process.env.OPENCLAW_STATE_DIR = `/tmp/openclaw-feishu-bot-menu-test-${Date.now()}-${Math.random().toString(36).slice(2)}`;
});
afterEach(() => {
if (originalStateDir === undefined) {
delete process.env.OPENCLAW_STATE_DIR;
return;
}
process.env.OPENCLAW_STATE_DIR = originalStateDir;
});
it("opens the quick-action launcher card at the webhook/event layer", async () => {
const onBotMenu = await registerHandlers();
await onBotMenu({
event_key: "quick-actions",
timestamp: "1700000000000",
operator: {
operator_id: {
open_id: "ou_user1",
user_id: "user_1",
union_id: "union_1",
},
},
});
await onBotMenu(createBotMenuEvent({ eventKey: "quick-actions", timestamp: "1700000000000" }));
expect(sendCardFeishuMock).toHaveBeenCalledWith(
expect.objectContaining({
@ -148,24 +162,17 @@ describe("Feishu bot menu handler", () => {
}),
);
const pending = onBotMenu({
event_key: "quick-actions",
timestamp: "1700000000000",
operator: {
operator_id: {
open_id: "ou_user1",
user_id: "user_1",
union_id: "union_1",
},
},
});
const pending = onBotMenu(
createBotMenuEvent({ eventKey: "quick-actions", timestamp: "1700000000001" }),
);
let settled = false;
pending.finally(() => {
settled = true;
});
await Promise.resolve();
expect(settled).toBe(true);
await vi.waitFor(() => {
expect(settled).toBe(true);
});
resolveSend?.();
await pending;
@ -174,17 +181,7 @@ describe("Feishu bot menu handler", () => {
it("falls back to the legacy /menu synthetic message path for unrelated bot menu keys", async () => {
const onBotMenu = await registerHandlers();
await onBotMenu({
event_key: "custom-key",
timestamp: "1700000000000",
operator: {
operator_id: {
open_id: "ou_user1",
user_id: "user_1",
union_id: "union_1",
},
},
});
await onBotMenu(createBotMenuEvent({ eventKey: "custom-key", timestamp: "1700000000002" }));
expect(handleFeishuMessageMock).toHaveBeenCalledWith(
expect.objectContaining({
@ -202,17 +199,7 @@ describe("Feishu bot menu handler", () => {
const onBotMenu = await registerHandlers();
sendCardFeishuMock.mockRejectedValueOnce(new Error("boom"));
await onBotMenu({
event_key: "quick-actions",
timestamp: "1700000000000",
operator: {
operator_id: {
open_id: "ou_user1",
user_id: "user_1",
union_id: "union_1",
},
},
});
await onBotMenu(createBotMenuEvent({ eventKey: "quick-actions", timestamp: "1700000000003" }));
await vi.waitFor(() => {
expect(handleFeishuMessageMock).toHaveBeenCalledWith(

View File

@ -0,0 +1,399 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { createPluginRuntimeMock } from "../../../test/helpers/extensions/plugin-runtime-mock.js";
import type { ClawdbotConfig, PluginRuntime, RuntimeEnv } from "../runtime-api.js";
import { monitorSingleAccount } from "./monitor.account.js";
import { setFeishuRuntime } from "./runtime.js";
import type { ResolvedFeishuAccount } from "./types.js";
const createEventDispatcherMock = vi.hoisted(() => vi.fn());
const monitorWebSocketMock = vi.hoisted(() => vi.fn(async () => {}));
const monitorWebhookMock = vi.hoisted(() => vi.fn(async () => {}));
const createFeishuThreadBindingManagerMock = vi.hoisted(() => vi.fn(() => ({ stop: vi.fn() })));
const createFeishuReplyDispatcherMock = vi.hoisted(() => vi.fn());
const resolveBoundConversationMock = vi.hoisted(() =>
vi.fn<
() => {
bindingId: string;
targetSessionKey: string;
} | null
>(() => null),
);
const touchBindingMock = vi.hoisted(() => vi.fn());
const resolveAgentRouteMock = vi.hoisted(() => vi.fn());
const dispatchReplyFromConfigMock = vi.hoisted(() => vi.fn());
const withReplyDispatcherMock = vi.hoisted(() => vi.fn());
const finalizeInboundContextMock = vi.hoisted(() => vi.fn((ctx) => ctx));
const getMessageFeishuMock = vi.hoisted(() => vi.fn(async () => null));
const listFeishuThreadMessagesMock = vi.hoisted(() => vi.fn(async () => []));
const sendMessageFeishuMock = vi.hoisted(() =>
vi.fn(async () => ({ messageId: "om_sent", chatId: "oc_broadcast_group" })),
);
let handlersByAccount = new Map<string, Record<string, (data: unknown) => Promise<void>>>();
let runtimesByAccount = new Map<string, RuntimeEnv>();
const originalStateDir = process.env.OPENCLAW_STATE_DIR;
vi.mock("./client.js", async () => {
const actual = await vi.importActual<typeof import("./client.js")>("./client.js");
return {
...actual,
createEventDispatcher: createEventDispatcherMock,
};
});
vi.mock("./monitor.transport.js", () => ({
monitorWebSocket: monitorWebSocketMock,
monitorWebhook: monitorWebhookMock,
}));
vi.mock("./thread-bindings.js", () => ({
createFeishuThreadBindingManager: createFeishuThreadBindingManagerMock,
}));
vi.mock("./reply-dispatcher.js", () => ({
createFeishuReplyDispatcher: createFeishuReplyDispatcherMock,
}));
vi.mock("./send.js", () => ({
getMessageFeishu: getMessageFeishuMock,
listFeishuThreadMessages: listFeishuThreadMessagesMock,
sendMessageFeishu: sendMessageFeishuMock,
}));
vi.mock("openclaw/plugin-sdk/conversation-runtime", async (importOriginal) => {
const actual = await importOriginal<typeof import("openclaw/plugin-sdk/conversation-runtime")>();
return {
...actual,
getSessionBindingService: () => ({
resolveByConversation: resolveBoundConversationMock,
touch: touchBindingMock,
}),
};
});
vi.mock("../../../src/infra/outbound/session-binding-service.js", () => ({
getSessionBindingService: () => ({
resolveByConversation: resolveBoundConversationMock,
touch: touchBindingMock,
}),
}));
function createLifecycleConfig(): ClawdbotConfig {
return {
broadcast: {
oc_broadcast_group: ["susan", "main"],
},
agents: {
list: [{ id: "main" }, { id: "susan" }],
},
channels: {
feishu: {
enabled: true,
groupPolicy: "open",
requireMention: false,
resolveSenderNames: false,
accounts: {
"account-A": {
enabled: true,
appId: "cli_a",
appSecret: "secret_a", // pragma: allowlist secret
connectionMode: "websocket",
groupPolicy: "open",
requireMention: false,
resolveSenderNames: false,
groups: {
oc_broadcast_group: {
requireMention: false,
},
},
},
"account-B": {
enabled: true,
appId: "cli_b",
appSecret: "secret_b", // pragma: allowlist secret
connectionMode: "websocket",
groupPolicy: "open",
requireMention: false,
resolveSenderNames: false,
groups: {
oc_broadcast_group: {
requireMention: false,
},
},
},
},
},
},
messages: {
inbound: {
debounceMs: 0,
byChannel: {
feishu: 0,
},
},
},
} as ClawdbotConfig;
}
function createLifecycleAccount(accountId: "account-A" | "account-B"): ResolvedFeishuAccount {
return {
accountId,
selectionSource: "explicit",
enabled: true,
configured: true,
appId: accountId === "account-A" ? "cli_a" : "cli_b",
appSecret: accountId === "account-A" ? "secret_a" : "secret_b", // pragma: allowlist secret
domain: "feishu",
config: {
enabled: true,
connectionMode: "websocket",
groupPolicy: "open",
requireMention: false,
resolveSenderNames: false,
groups: {
oc_broadcast_group: {
requireMention: false,
},
},
},
} as unknown as ResolvedFeishuAccount;
}
function createRuntimeEnv(): RuntimeEnv {
return {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
} as RuntimeEnv;
}
function createBroadcastEvent(messageId: string) {
return {
sender: {
sender_id: { open_id: "ou_sender_1" },
sender_type: "user",
},
message: {
message_id: messageId,
chat_id: "oc_broadcast_group",
chat_type: "group" as const,
message_type: "text",
content: JSON.stringify({ text: "hello broadcast" }),
create_time: "1710000000000",
},
};
}
async function settleAsyncWork(): Promise<void> {
for (let i = 0; i < 6; i += 1) {
await Promise.resolve();
await new Promise((resolve) => setTimeout(resolve, 0));
}
}
async function setupLifecycleMonitor(accountId: "account-A" | "account-B") {
const register = vi.fn((registered: Record<string, (data: unknown) => Promise<void>>) => {
handlersByAccount.set(accountId, registered);
});
createEventDispatcherMock.mockReturnValueOnce({ register });
const runtime = createRuntimeEnv();
runtimesByAccount.set(accountId, runtime);
await monitorSingleAccount({
cfg: createLifecycleConfig(),
account: createLifecycleAccount(accountId),
runtime,
botOpenIdSource: {
kind: "prefetched",
botOpenId: "ou_bot_1",
botName: "Bot",
},
});
const onMessage = handlersByAccount.get(accountId)?.["im.message.receive_v1"];
if (!onMessage) {
throw new Error(`missing im.message.receive_v1 handler for ${accountId}`);
}
return onMessage;
}
describe("Feishu broadcast reply-once lifecycle", () => {
beforeEach(() => {
vi.clearAllMocks();
handlersByAccount = new Map();
runtimesByAccount = new Map();
process.env.OPENCLAW_STATE_DIR = `/tmp/openclaw-feishu-broadcast-${Date.now()}-${Math.random().toString(36).slice(2)}`;
const activeDispatcher = {
sendToolResult: vi.fn(() => false),
sendBlockReply: vi.fn(() => false),
sendFinalReply: vi.fn(async () => true),
waitForIdle: vi.fn(async () => {}),
getQueuedCounts: vi.fn(() => ({ tool: 0, block: 0, final: 0 })),
markComplete: vi.fn(),
};
createFeishuReplyDispatcherMock.mockReturnValue({
dispatcher: activeDispatcher,
replyOptions: {},
markDispatchIdle: vi.fn(),
});
resolveBoundConversationMock.mockReturnValue(null);
resolveAgentRouteMock.mockReturnValue({
agentId: "main",
channel: "feishu",
accountId: "account-A",
sessionKey: "agent:main:feishu:group:oc_broadcast_group",
mainSessionKey: "agent:main:main",
matchedBy: "default",
});
dispatchReplyFromConfigMock.mockImplementation(async ({ ctx, dispatcher }) => {
if (
typeof ctx?.SessionKey === "string" &&
ctx.SessionKey.includes("agent:main:") &&
typeof dispatcher?.sendFinalReply === "function"
) {
await dispatcher.sendFinalReply({ text: "broadcast reply once" });
}
return {
queuedFinal: false,
counts: {
final:
typeof ctx?.SessionKey === "string" && ctx.SessionKey.includes("agent:main:") ? 1 : 0,
},
};
});
withReplyDispatcherMock.mockImplementation(async ({ run }) => await run());
setFeishuRuntime(
createPluginRuntimeMock({
channel: {
debounce: {
resolveInboundDebounceMs: vi.fn(() => 0),
createInboundDebouncer: <T>(params: {
onFlush?: (items: T[]) => Promise<void>;
onError?: (err: unknown, items: T[]) => void;
}) => ({
enqueue: async (item: T) => {
try {
await params.onFlush?.([item]);
} catch (err) {
params.onError?.(err, [item]);
}
},
flushKey: async () => {},
}),
},
text: {
hasControlCommand: vi.fn(() => false),
},
routing: {
resolveAgentRoute:
resolveAgentRouteMock as unknown as PluginRuntime["channel"]["routing"]["resolveAgentRoute"],
},
reply: {
resolveEnvelopeFormatOptions: vi.fn(() => ({})),
formatAgentEnvelope: vi.fn((params: { body: string }) => params.body),
finalizeInboundContext:
finalizeInboundContextMock as unknown as PluginRuntime["channel"]["reply"]["finalizeInboundContext"],
dispatchReplyFromConfig:
dispatchReplyFromConfigMock as unknown as PluginRuntime["channel"]["reply"]["dispatchReplyFromConfig"],
withReplyDispatcher:
withReplyDispatcherMock as unknown as PluginRuntime["channel"]["reply"]["withReplyDispatcher"],
},
commands: {
shouldComputeCommandAuthorized: vi.fn(() => false),
resolveCommandAuthorizedFromAuthorizers: vi.fn(() => false),
},
session: {
readSessionUpdatedAt: vi.fn(),
resolveStorePath: vi.fn(() => "/tmp/feishu-broadcast-sessions.json"),
},
pairing: {
readAllowFromStore: vi.fn().mockResolvedValue([]),
upsertPairingRequest: vi.fn(),
buildPairingReply: vi.fn(),
},
},
media: {
detectMime: vi.fn(async () => "text/plain"),
},
}) as unknown as PluginRuntime,
);
});
afterEach(() => {
if (originalStateDir === undefined) {
delete process.env.OPENCLAW_STATE_DIR;
return;
}
process.env.OPENCLAW_STATE_DIR = originalStateDir;
});
it("uses one active reply path when the same broadcast event reaches two accounts", async () => {
const onMessageA = await setupLifecycleMonitor("account-A");
const onMessageB = await setupLifecycleMonitor("account-B");
const event = createBroadcastEvent("om_broadcast_once");
await onMessageA(event);
await settleAsyncWork();
await onMessageB(event);
await settleAsyncWork();
expect(runtimesByAccount.get("account-A")?.error).not.toHaveBeenCalled();
expect(runtimesByAccount.get("account-B")?.error).not.toHaveBeenCalled();
expect(dispatchReplyFromConfigMock).toHaveBeenCalledTimes(2);
expect(createFeishuReplyDispatcherMock).toHaveBeenCalledTimes(1);
expect(createFeishuReplyDispatcherMock).toHaveBeenCalledWith(
expect.objectContaining({
accountId: "account-a",
chatId: "oc_broadcast_group",
replyToMessageId: "om_broadcast_once",
}),
);
const sessionKeys = finalizeInboundContextMock.mock.calls.map(
(call) => (call[0] as { SessionKey?: string }).SessionKey,
);
expect(sessionKeys).toContain("agent:main:feishu:group:oc_broadcast_group");
expect(sessionKeys).toContain("agent:susan:feishu:group:oc_broadcast_group");
const activeDispatcher = createFeishuReplyDispatcherMock.mock.results[0]?.value.dispatcher as {
sendFinalReply: ReturnType<typeof vi.fn>;
};
expect(activeDispatcher.sendFinalReply).toHaveBeenCalledTimes(1);
});
it("does not duplicate delivery after a post-send failure on the first account", async () => {
const onMessageA = await setupLifecycleMonitor("account-A");
const onMessageB = await setupLifecycleMonitor("account-B");
const event = createBroadcastEvent("om_broadcast_retry");
dispatchReplyFromConfigMock.mockImplementationOnce(async ({ ctx, dispatcher }) => {
if (typeof ctx?.SessionKey === "string" && ctx.SessionKey.includes("agent:susan:")) {
return { queuedFinal: false, counts: { final: 0 } };
}
await dispatcher.sendFinalReply({ text: "broadcast reply once" });
throw new Error("post-send failure");
});
await onMessageA(event);
await settleAsyncWork();
await onMessageB(event);
await settleAsyncWork();
expect(runtimesByAccount.get("account-A")?.error).not.toHaveBeenCalled();
expect(runtimesByAccount.get("account-B")?.error).not.toHaveBeenCalled();
expect(dispatchReplyFromConfigMock).toHaveBeenCalledTimes(2);
const activeDispatcher = createFeishuReplyDispatcherMock.mock.results[0]?.value.dispatcher as {
sendFinalReply: ReturnType<typeof vi.fn>;
};
expect(activeDispatcher.sendFinalReply).toHaveBeenCalledTimes(1);
});
});

View File

@ -0,0 +1,394 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { createPluginRuntimeMock } from "../../../test/helpers/extensions/plugin-runtime-mock.js";
import type { ClawdbotConfig, PluginRuntime, RuntimeEnv } from "../runtime-api.js";
import { createFeishuCardInteractionEnvelope } from "./card-interaction.js";
import { monitorSingleAccount } from "./monitor.account.js";
import { setFeishuRuntime } from "./runtime.js";
import type { ResolvedFeishuAccount } from "./types.js";
type BoundConversation = {
bindingId: string;
targetSessionKey: string;
};
const createEventDispatcherMock = vi.hoisted(() => vi.fn());
const monitorWebSocketMock = vi.hoisted(() => vi.fn(async () => {}));
const monitorWebhookMock = vi.hoisted(() => vi.fn(async () => {}));
const createFeishuThreadBindingManagerMock = vi.hoisted(() => vi.fn(() => ({ stop: vi.fn() })));
const createFeishuReplyDispatcherMock = vi.hoisted(() => vi.fn());
const resolveBoundConversationMock = vi.hoisted(() =>
vi.fn<() => BoundConversation | null>(() => null),
);
const touchBindingMock = vi.hoisted(() => vi.fn());
const resolveAgentRouteMock = vi.hoisted(() => vi.fn());
const dispatchReplyFromConfigMock = vi.hoisted(() => vi.fn());
const withReplyDispatcherMock = vi.hoisted(() => vi.fn());
const finalizeInboundContextMock = vi.hoisted(() => vi.fn((ctx) => ctx));
const sendMessageFeishuMock = vi.hoisted(() =>
vi.fn(async () => ({ messageId: "om_notice", chatId: "p2p:ou_user1" })),
);
const sendCardFeishuMock = vi.hoisted(() =>
vi.fn(async () => ({ messageId: "om_card", chatId: "p2p:ou_user1" })),
);
const getMessageFeishuMock = vi.hoisted(() => vi.fn(async () => null));
const listFeishuThreadMessagesMock = vi.hoisted(() => vi.fn(async () => []));
let handlers: Record<string, (data: unknown) => Promise<void>> = {};
let lastRuntime: RuntimeEnv | null = null;
const originalStateDir = process.env.OPENCLAW_STATE_DIR;
vi.mock("./client.js", async () => {
const actual = await vi.importActual<typeof import("./client.js")>("./client.js");
return {
...actual,
createEventDispatcher: createEventDispatcherMock,
};
});
vi.mock("./monitor.transport.js", () => ({
monitorWebSocket: monitorWebSocketMock,
monitorWebhook: monitorWebhookMock,
}));
vi.mock("./thread-bindings.js", () => ({
createFeishuThreadBindingManager: createFeishuThreadBindingManagerMock,
}));
vi.mock("./reply-dispatcher.js", () => ({
createFeishuReplyDispatcher: createFeishuReplyDispatcherMock,
}));
vi.mock("./send.js", () => ({
sendMessageFeishu: sendMessageFeishuMock,
sendCardFeishu: sendCardFeishuMock,
getMessageFeishu: getMessageFeishuMock,
listFeishuThreadMessages: listFeishuThreadMessagesMock,
}));
vi.mock("openclaw/plugin-sdk/conversation-runtime", async (importOriginal) => {
const actual = await importOriginal<typeof import("openclaw/plugin-sdk/conversation-runtime")>();
return {
...actual,
getSessionBindingService: () => ({
resolveByConversation: resolveBoundConversationMock,
touch: touchBindingMock,
}),
};
});
vi.mock("../../../src/infra/outbound/session-binding-service.js", () => ({
getSessionBindingService: () => ({
resolveByConversation: resolveBoundConversationMock,
touch: touchBindingMock,
}),
}));
function createLifecycleConfig(): ClawdbotConfig {
return {
channels: {
feishu: {
enabled: true,
dmPolicy: "open",
requireMention: false,
resolveSenderNames: false,
accounts: {
"acct-card": {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "websocket",
dmPolicy: "open",
requireMention: false,
resolveSenderNames: false,
},
},
},
},
messages: {
inbound: {
debounceMs: 0,
byChannel: {
feishu: 0,
},
},
},
} as ClawdbotConfig;
}
function createLifecycleAccount(): ResolvedFeishuAccount {
return {
accountId: "acct-card",
selectionSource: "explicit",
enabled: true,
configured: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
domain: "feishu",
config: {
enabled: true,
connectionMode: "websocket",
dmPolicy: "open",
requireMention: false,
resolveSenderNames: false,
},
} as unknown as ResolvedFeishuAccount;
}
function createRuntimeEnv(): RuntimeEnv {
return {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
} as RuntimeEnv;
}
function createCardActionEvent(params: {
token: string;
action: string;
command: string;
chatId?: string;
chatType?: "group" | "p2p";
}) {
const openId = "ou_user1";
const chatId = params.chatId ?? "p2p:ou_user1";
const chatType = params.chatType ?? "p2p";
return {
operator: {
open_id: openId,
user_id: "user_1",
union_id: "union_1",
},
token: params.token,
action: {
tag: "button",
value: createFeishuCardInteractionEnvelope({
k: "quick",
a: params.action,
q: params.command,
c: {
u: openId,
h: chatId,
t: chatType,
e: Date.now() + 60_000,
},
}),
},
context: {
open_id: openId,
user_id: "user_1",
chat_id: chatId,
},
};
}
async function settleAsyncWork(): Promise<void> {
for (let i = 0; i < 6; i += 1) {
await Promise.resolve();
await new Promise((resolve) => setTimeout(resolve, 0));
}
}
async function setupLifecycleMonitor() {
const register = vi.fn((registered: Record<string, (data: unknown) => Promise<void>>) => {
handlers = registered;
});
createEventDispatcherMock.mockReturnValue({ register });
lastRuntime = createRuntimeEnv();
await monitorSingleAccount({
cfg: createLifecycleConfig(),
account: createLifecycleAccount(),
runtime: lastRuntime,
botOpenIdSource: {
kind: "prefetched",
botOpenId: "ou_bot_1",
botName: "Bot",
},
});
const onCardAction = handlers["card.action.trigger"];
if (!onCardAction) {
throw new Error("missing card.action.trigger handler");
}
return onCardAction;
}
describe("Feishu card-action lifecycle", () => {
beforeEach(() => {
vi.clearAllMocks();
handlers = {};
lastRuntime = null;
process.env.OPENCLAW_STATE_DIR = `/tmp/openclaw-feishu-card-action-${Date.now()}-${Math.random().toString(36).slice(2)}`;
const dispatcher = {
sendToolResult: vi.fn(() => false),
sendBlockReply: vi.fn(() => false),
sendFinalReply: vi.fn(async () => true),
waitForIdle: vi.fn(async () => {}),
getQueuedCounts: vi.fn(() => ({ tool: 0, block: 0, final: 0 })),
markComplete: vi.fn(),
};
createFeishuReplyDispatcherMock.mockReturnValue({
dispatcher,
replyOptions: {},
markDispatchIdle: vi.fn(),
});
resolveBoundConversationMock.mockImplementation(() => ({
bindingId: "binding-card",
targetSessionKey: "agent:bound-agent:feishu:direct:ou_user1",
}));
resolveAgentRouteMock.mockReturnValue({
agentId: "main",
channel: "feishu",
accountId: "acct-card",
sessionKey: "agent:main:feishu:direct:ou_user1",
mainSessionKey: "agent:main:main",
matchedBy: "default",
});
dispatchReplyFromConfigMock.mockImplementation(async ({ dispatcher }) => {
await dispatcher.sendFinalReply({ text: "card action reply once" });
return {
queuedFinal: false,
counts: { final: 1 },
};
});
withReplyDispatcherMock.mockImplementation(async ({ run }) => await run());
setFeishuRuntime(
createPluginRuntimeMock({
channel: {
debounce: {
resolveInboundDebounceMs: vi.fn(() => 0),
createInboundDebouncer: <T>(params: {
onFlush?: (items: T[]) => Promise<void>;
onError?: (err: unknown, items: T[]) => void;
}) => ({
enqueue: async (item: T) => {
try {
await params.onFlush?.([item]);
} catch (err) {
params.onError?.(err, [item]);
}
},
flushKey: async () => {},
}),
},
text: {
hasControlCommand: vi.fn(() => false),
},
routing: {
resolveAgentRoute:
resolveAgentRouteMock as unknown as PluginRuntime["channel"]["routing"]["resolveAgentRoute"],
},
reply: {
resolveEnvelopeFormatOptions: vi.fn(() => ({})),
formatAgentEnvelope: vi.fn((params: { body: string }) => params.body),
finalizeInboundContext:
finalizeInboundContextMock as unknown as PluginRuntime["channel"]["reply"]["finalizeInboundContext"],
dispatchReplyFromConfig:
dispatchReplyFromConfigMock as unknown as PluginRuntime["channel"]["reply"]["dispatchReplyFromConfig"],
withReplyDispatcher:
withReplyDispatcherMock as unknown as PluginRuntime["channel"]["reply"]["withReplyDispatcher"],
},
commands: {
shouldComputeCommandAuthorized: vi.fn(() => false),
resolveCommandAuthorizedFromAuthorizers: vi.fn(() => false),
},
session: {
readSessionUpdatedAt: vi.fn(),
resolveStorePath: vi.fn(() => "/tmp/feishu-card-action-sessions.json"),
},
pairing: {
readAllowFromStore: vi.fn().mockResolvedValue([]),
upsertPairingRequest: vi.fn(),
buildPairingReply: vi.fn(),
},
},
media: {
detectMime: vi.fn(async () => "text/plain"),
},
}) as unknown as PluginRuntime,
);
});
afterEach(() => {
if (originalStateDir === undefined) {
delete process.env.OPENCLAW_STATE_DIR;
return;
}
process.env.OPENCLAW_STATE_DIR = originalStateDir;
});
it("routes one reply across duplicate callback delivery", async () => {
const onCardAction = await setupLifecycleMonitor();
const event = createCardActionEvent({
token: "tok-card-once",
action: "feishu.quick_actions.help",
command: "/help",
});
await onCardAction(event);
await settleAsyncWork();
await onCardAction(event);
await settleAsyncWork();
expect(lastRuntime?.error).not.toHaveBeenCalled();
expect(dispatchReplyFromConfigMock).toHaveBeenCalledTimes(1);
expect(createFeishuReplyDispatcherMock).toHaveBeenCalledTimes(1);
expect(createFeishuReplyDispatcherMock).toHaveBeenCalledWith(
expect.objectContaining({
accountId: "acct-card",
chatId: "p2p:ou_user1",
replyToMessageId: "card-action-tok-card-once",
}),
);
expect(finalizeInboundContextMock).toHaveBeenCalledWith(
expect.objectContaining({
AccountId: "acct-card",
SessionKey: "agent:bound-agent:feishu:direct:ou_user1",
MessageSid: "card-action-tok-card-once",
}),
);
expect(touchBindingMock).toHaveBeenCalledWith("binding-card");
const dispatcher = createFeishuReplyDispatcherMock.mock.results[0]?.value.dispatcher as {
sendFinalReply: ReturnType<typeof vi.fn>;
};
expect(dispatcher.sendFinalReply).toHaveBeenCalledTimes(1);
expect(sendMessageFeishuMock).not.toHaveBeenCalled();
expect(sendCardFeishuMock).not.toHaveBeenCalled();
});
it("does not duplicate delivery when retrying after a post-send failure", async () => {
const onCardAction = await setupLifecycleMonitor();
const event = createCardActionEvent({
token: "tok-card-retry",
action: "feishu.quick_actions.help",
command: "/help",
});
dispatchReplyFromConfigMock.mockImplementationOnce(async ({ dispatcher }) => {
await dispatcher.sendFinalReply({ text: "card action reply once" });
throw new Error("post-send failure");
});
await onCardAction(event);
await settleAsyncWork();
await onCardAction(event);
await settleAsyncWork();
expect(lastRuntime?.error).toHaveBeenCalledTimes(1);
expect(dispatchReplyFromConfigMock).toHaveBeenCalledTimes(1);
const dispatcher = createFeishuReplyDispatcherMock.mock.results[0]?.value.dispatcher as {
sendFinalReply: ReturnType<typeof vi.fn>;
};
expect(dispatcher.sendFinalReply).toHaveBeenCalledTimes(1);
});
});

View File

@ -0,0 +1,372 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { createPluginRuntimeMock } from "../../../test/helpers/extensions/plugin-runtime-mock.js";
import type { ClawdbotConfig, PluginRuntime, RuntimeEnv } from "../runtime-api.js";
import { monitorSingleAccount } from "./monitor.account.js";
import { setFeishuRuntime } from "./runtime.js";
import type { ResolvedFeishuAccount } from "./types.js";
const createEventDispatcherMock = vi.hoisted(() => vi.fn());
const monitorWebSocketMock = vi.hoisted(() => vi.fn(async () => {}));
const monitorWebhookMock = vi.hoisted(() => vi.fn(async () => {}));
const createFeishuThreadBindingManagerMock = vi.hoisted(() => vi.fn(() => ({ stop: vi.fn() })));
const createFeishuReplyDispatcherMock = vi.hoisted(() => vi.fn());
const resolveBoundConversationMock = vi.hoisted(() =>
vi.fn<
() => {
bindingId: string;
targetSessionKey: string;
} | null
>(() => null),
);
const touchBindingMock = vi.hoisted(() => vi.fn());
const resolveAgentRouteMock = vi.hoisted(() => vi.fn());
const dispatchReplyFromConfigMock = vi.hoisted(() => vi.fn());
const withReplyDispatcherMock = vi.hoisted(() => vi.fn());
const finalizeInboundContextMock = vi.hoisted(() => vi.fn((ctx) => ctx));
const getMessageFeishuMock = vi.hoisted(() => vi.fn(async () => null));
const listFeishuThreadMessagesMock = vi.hoisted(() => vi.fn(async () => []));
const sendMessageFeishuMock = vi.hoisted(() =>
vi.fn(async () => ({ messageId: "om_sent", chatId: "oc_group_1" })),
);
let handlers: Record<string, (data: unknown) => Promise<void>> = {};
let lastRuntime: RuntimeEnv | null = null;
const originalStateDir = process.env.OPENCLAW_STATE_DIR;
vi.mock("./client.js", async () => {
const actual = await vi.importActual<typeof import("./client.js")>("./client.js");
return {
...actual,
createEventDispatcher: createEventDispatcherMock,
};
});
vi.mock("./monitor.transport.js", () => ({
monitorWebSocket: monitorWebSocketMock,
monitorWebhook: monitorWebhookMock,
}));
vi.mock("./thread-bindings.js", () => ({
createFeishuThreadBindingManager: createFeishuThreadBindingManagerMock,
}));
vi.mock("./reply-dispatcher.js", () => ({
createFeishuReplyDispatcher: createFeishuReplyDispatcherMock,
}));
vi.mock("./send.js", () => ({
getMessageFeishu: getMessageFeishuMock,
listFeishuThreadMessages: listFeishuThreadMessagesMock,
sendMessageFeishu: sendMessageFeishuMock,
}));
vi.mock("openclaw/plugin-sdk/conversation-runtime", async (importOriginal) => {
const actual = await importOriginal<typeof import("openclaw/plugin-sdk/conversation-runtime")>();
return {
...actual,
getSessionBindingService: () => ({
resolveByConversation: resolveBoundConversationMock,
touch: touchBindingMock,
}),
};
});
vi.mock("../../../src/infra/outbound/session-binding-service.js", () => ({
getSessionBindingService: () => ({
resolveByConversation: resolveBoundConversationMock,
touch: touchBindingMock,
}),
}));
function createLifecycleConfig(): ClawdbotConfig {
return {
messages: {
inbound: {
debounceMs: 0,
byChannel: {
feishu: 0,
},
},
},
channels: {
feishu: {
enabled: true,
accounts: {
"acct-lifecycle": {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "websocket",
groupPolicy: "open",
requireMention: false,
resolveSenderNames: false,
groups: {
oc_group_1: {
requireMention: false,
groupSessionScope: "group_topic_sender",
replyInThread: "enabled",
},
},
},
},
},
},
} as ClawdbotConfig;
}
function createLifecycleAccount(): ResolvedFeishuAccount {
return {
accountId: "acct-lifecycle",
selectionSource: "explicit",
enabled: true,
configured: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
domain: "feishu",
config: {
enabled: true,
connectionMode: "websocket",
groupPolicy: "open",
requireMention: false,
resolveSenderNames: false,
groups: {
oc_group_1: {
requireMention: false,
groupSessionScope: "group_topic_sender",
replyInThread: "enabled",
},
},
},
} as unknown as ResolvedFeishuAccount;
}
function createRuntimeEnv(): RuntimeEnv {
return {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
} as RuntimeEnv;
}
function createTextEvent(messageId: string) {
return {
sender: {
sender_id: { open_id: "ou_sender_1" },
sender_type: "user",
},
message: {
message_id: messageId,
root_id: "om_root_topic_1",
thread_id: "omt_topic_1",
chat_id: "oc_group_1",
chat_type: "group" as const,
message_type: "text",
content: JSON.stringify({ text: "hello from topic" }),
create_time: "1710000000000",
},
};
}
async function settleAsyncWork(): Promise<void> {
for (let i = 0; i < 6; i += 1) {
await Promise.resolve();
await new Promise((resolve) => setTimeout(resolve, 0));
}
}
async function setupLifecycleMonitor() {
const register = vi.fn((registered: Record<string, (data: unknown) => Promise<void>>) => {
handlers = registered;
});
createEventDispatcherMock.mockReturnValue({ register });
lastRuntime = createRuntimeEnv();
await monitorSingleAccount({
cfg: createLifecycleConfig(),
account: createLifecycleAccount(),
runtime: lastRuntime,
botOpenIdSource: {
kind: "prefetched",
botOpenId: "ou_bot_1",
botName: "Bot",
},
});
const onMessage = handlers["im.message.receive_v1"];
if (!onMessage) {
throw new Error("missing im.message.receive_v1 handler");
}
return onMessage;
}
describe("Feishu reply-once lifecycle", () => {
beforeEach(() => {
vi.clearAllMocks();
handlers = {};
lastRuntime = null;
process.env.OPENCLAW_STATE_DIR = `/tmp/openclaw-feishu-lifecycle-${Date.now()}-${Math.random().toString(36).slice(2)}`;
const dispatcher = {
sendToolResult: vi.fn(() => false),
sendBlockReply: vi.fn(() => false),
sendFinalReply: vi.fn(async () => true),
waitForIdle: vi.fn(async () => {}),
getQueuedCounts: vi.fn(() => ({ tool: 0, block: 0, final: 0 })),
markComplete: vi.fn(),
};
createFeishuReplyDispatcherMock.mockReturnValue({
dispatcher,
replyOptions: {},
markDispatchIdle: vi.fn(),
});
resolveBoundConversationMock.mockReturnValue({
bindingId: "binding-1",
targetSessionKey: "agent:bound-agent:feishu:topic:om_root_topic_1:ou_sender_1",
});
resolveAgentRouteMock.mockReturnValue({
agentId: "main",
channel: "feishu",
accountId: "acct-lifecycle",
sessionKey: "agent:main:feishu:group:oc_group_1",
mainSessionKey: "agent:main:main",
matchedBy: "default",
});
dispatchReplyFromConfigMock.mockImplementation(async ({ dispatcher }) => {
await dispatcher.sendFinalReply({ text: "reply once" });
return {
queuedFinal: false,
counts: { final: 1 },
};
});
withReplyDispatcherMock.mockImplementation(async ({ run }) => await run());
setFeishuRuntime(
createPluginRuntimeMock({
channel: {
debounce: {
resolveInboundDebounceMs: vi.fn(() => 0),
createInboundDebouncer: <T>(params: {
onFlush?: (items: T[]) => Promise<void>;
onError?: (err: unknown, items: T[]) => void;
}) => ({
enqueue: async (item: T) => {
try {
await params.onFlush?.([item]);
} catch (err) {
params.onError?.(err, [item]);
}
},
flushKey: async () => {},
}),
},
text: {
hasControlCommand: vi.fn(() => false),
},
routing: {
resolveAgentRoute:
resolveAgentRouteMock as unknown as PluginRuntime["channel"]["routing"]["resolveAgentRoute"],
},
reply: {
resolveEnvelopeFormatOptions: vi.fn(() => ({})),
formatAgentEnvelope: vi.fn((params: { body: string }) => params.body),
finalizeInboundContext:
finalizeInboundContextMock as unknown as PluginRuntime["channel"]["reply"]["finalizeInboundContext"],
dispatchReplyFromConfig:
dispatchReplyFromConfigMock as unknown as PluginRuntime["channel"]["reply"]["dispatchReplyFromConfig"],
withReplyDispatcher:
withReplyDispatcherMock as unknown as PluginRuntime["channel"]["reply"]["withReplyDispatcher"],
},
commands: {
shouldComputeCommandAuthorized: vi.fn(() => false),
resolveCommandAuthorizedFromAuthorizers: vi.fn(() => false),
},
session: {
readSessionUpdatedAt: vi.fn(),
resolveStorePath: vi.fn(() => "/tmp/feishu-lifecycle-sessions.json"),
},
pairing: {
readAllowFromStore: vi.fn().mockResolvedValue([]),
upsertPairingRequest: vi.fn(),
buildPairingReply: vi.fn(),
},
},
media: {
detectMime: vi.fn(async () => "text/plain"),
},
}) as unknown as PluginRuntime,
);
});
afterEach(() => {
if (originalStateDir === undefined) {
delete process.env.OPENCLAW_STATE_DIR;
return;
}
process.env.OPENCLAW_STATE_DIR = originalStateDir;
});
it("routes a topic-bound inbound event and emits one reply across duplicate replay", async () => {
const onMessage = await setupLifecycleMonitor();
const event = createTextEvent("om_lifecycle_once");
await onMessage(event);
await settleAsyncWork();
await onMessage(event);
await settleAsyncWork();
expect(lastRuntime?.error).not.toHaveBeenCalled();
expect(dispatchReplyFromConfigMock).toHaveBeenCalledTimes(1);
expect(createFeishuReplyDispatcherMock).toHaveBeenCalledTimes(1);
expect(createFeishuReplyDispatcherMock).toHaveBeenCalledWith(
expect.objectContaining({
accountId: "acct-lifecycle",
chatId: "oc_group_1",
replyToMessageId: "om_root_topic_1",
replyInThread: true,
rootId: "om_root_topic_1",
}),
);
expect(finalizeInboundContextMock).toHaveBeenCalledWith(
expect.objectContaining({
AccountId: "acct-lifecycle",
SessionKey: "agent:bound-agent:feishu:topic:om_root_topic_1:ou_sender_1",
MessageSid: "om_lifecycle_once",
MessageThreadId: "om_root_topic_1",
}),
);
expect(touchBindingMock).toHaveBeenCalledWith("binding-1");
const dispatcher = createFeishuReplyDispatcherMock.mock.results[0]?.value.dispatcher as {
sendFinalReply: ReturnType<typeof vi.fn>;
};
expect(dispatcher.sendFinalReply).toHaveBeenCalledTimes(1);
});
it("does not duplicate delivery when the first attempt fails after sending the reply", async () => {
const onMessage = await setupLifecycleMonitor();
const event = createTextEvent("om_lifecycle_retry");
dispatchReplyFromConfigMock.mockImplementationOnce(async ({ dispatcher }) => {
await dispatcher.sendFinalReply({ text: "reply once" });
throw new Error("post-send failure");
});
await onMessage(event);
await settleAsyncWork();
await onMessage(event);
await settleAsyncWork();
expect(lastRuntime?.error).toHaveBeenCalledTimes(1);
expect(dispatchReplyFromConfigMock).toHaveBeenCalledTimes(1);
const dispatcher = createFeishuReplyDispatcherMock.mock.results[0]?.value.dispatcher as {
sendFinalReply: ReturnType<typeof vi.fn>;
};
expect(dispatcher.sendFinalReply).toHaveBeenCalledTimes(1);
});
});

View File

@ -1,4 +1,5 @@
// Keep the external runtime API light so Jiti callers can resolve Matrix config
// helpers without traversing the full plugin-sdk/runtime graph.
// helpers without traversing the full plugin-sdk/runtime graph or bootstrapping
// matrix-js-sdk during plain runtime-api import.
export * from "./src/auth-precedence.js";
export * from "./helper-api.js";

View File

@ -434,7 +434,7 @@ export const matrixPlugin: ChannelPlugin<ResolvedMatrixAccount> = {
//
// INVARIANT: The import() below cannot hang because:
// 1. It only loads local ESM modules with no circular awaits
// 2. Module initialization is synchronous (no top-level await in ./matrix/index.js)
// 2. Module initialization is synchronous (no top-level await in ./matrix/monitor/index.js)
// 3. The lock only serializes the import phase, not the provider startup
const previousLock = matrixStartupLock;
let releaseLock: () => void = () => {};
@ -445,9 +445,9 @@ export const matrixPlugin: ChannelPlugin<ResolvedMatrixAccount> = {
// Lazy import: the monitor pulls the reply pipeline; avoid ESM init cycles.
// Wrap in try/finally to ensure lock is released even if import fails.
let monitorMatrixProvider: typeof import("./matrix/index.js").monitorMatrixProvider;
let monitorMatrixProvider: typeof import("./matrix/monitor/index.js").monitorMatrixProvider;
try {
const module = await import("./matrix/index.js");
const module = await import("./matrix/monitor/index.js");
monitorMatrixProvider = module.monitorMatrixProvider;
} finally {
// Release lock after import completes or fails

View File

@ -100,10 +100,10 @@ function createHandlerHarness() {
mediaMaxBytes: 5 * 1024 * 1024,
startupMs: Date.now() - 120_000,
startupGraceMs: 60_000,
dropPreStartupMessages: false,
directTracker: {
isDirectMessage: vi.fn().mockResolvedValue(true),
},
dropPreStartupMessages: true,
getRoomInfo: vi.fn().mockResolvedValue({
name: "Media Room",
canonicalAlias: "#media:example.org",

View File

@ -590,10 +590,10 @@ describe("matrix monitor handler pairing account scope", () => {
mediaMaxBytes: 10_000_000,
startupMs: 0,
startupGraceMs: 0,
dropPreStartupMessages: false,
directTracker: {
isDirectMessage: async () => false,
},
dropPreStartupMessages: true,
getRoomInfo: async () => ({ altAliases: [] }),
getMemberDisplayName: async () => "sender",
needsRoomAliasesForConfig: false,

View File

@ -115,10 +115,10 @@ describe("createMatrixRoomMessageHandler thread root media", () => {
mediaMaxBytes: 5 * 1024 * 1024,
startupMs: Date.now() - 120_000,
startupGraceMs: 60_000,
dropPreStartupMessages: false,
directTracker: {
isDirectMessage: vi.fn().mockResolvedValue(true),
},
dropPreStartupMessages: true,
getRoomInfo: vi.fn().mockResolvedValue({
name: "Media Room",
canonicalAlias: "#media:example.org",

View File

@ -1,2 +1,4 @@
export * from "openclaw/plugin-sdk/matrix";
export * from "../runtime-api.js";
// Keep auth-precedence available internally without re-exporting helper-api
// twice through both plugin-sdk/matrix and ../runtime-api.js.
export * from "./auth-precedence.js";

View File

@ -1382,14 +1382,14 @@ describe("createTelegramBot", () => {
expect(replySpy).not.toHaveBeenCalled();
});
it.skip("routes plugin-owned callback namespaces before synthetic command fallback", async () => {
it("routes plugin-owned callback namespaces before synthetic command fallback", async () => {
onSpy.mockClear();
replySpy.mockClear();
editMessageTextSpy.mockClear();
sendMessageSpy.mockClear();
registerPluginInteractiveHandler("codex-plugin", {
channel: "telegram",
namespace: "codex",
namespace: "codexapp",
handler: async ({ respond, callback }: PluginInteractiveTelegramHandlerContext) => {
await respond.editMessage({
text: `Handled ${callback.payload}`,
@ -1416,7 +1416,7 @@ describe("createTelegramBot", () => {
await callbackHandler({
callbackQuery: {
id: "cbq-codex-1",
data: "codex:resume:thread-1",
data: "codexapp:resume:thread-1",
from: { id: 9, first_name: "Ada", username: "ada_bot" },
message: {
chat: { id: 1234, type: "private" },

View File

@ -4,7 +4,8 @@
"description": "OpenClaw Tlon/Urbit channel plugin",
"type": "module",
"dependencies": {
"@tloncorp/api": "git+https://github.com/tloncorp/api-beta.git#7eede1c1a756977b09f96aa14a92e2b06318ae87",
"@aws-sdk/client-s3": "3.1000.0",
"@aws-sdk/s3-request-presigner": "3.1000.0",
"@tloncorp/tlon-skill": "0.2.2",
"@urbit/aura": "^3.0.0",
"zod": "^4.3.6"

View File

@ -1,5 +1,4 @@
import crypto from "node:crypto";
import { configureClient } from "@tloncorp/api";
import type {
ChannelAccountSnapshot,
ChannelOutboundAdapter,
@ -15,6 +14,7 @@ import {
parseTlonTarget,
resolveTlonOutboundTarget,
} from "./targets.js";
import { configureClient } from "./tlon-api.js";
import { resolveTlonAccount } from "./types.js";
import { authenticate } from "./urbit/auth.js";
import { ssrfPolicyFromAllowPrivateNetwork } from "./urbit/context.js";
@ -169,6 +169,7 @@ export const tlonRuntimeOutbound: ChannelOutboundAdapter = {
shipName: account.ship.replace(/^~/, ""),
verbose: false,
getCode: async () => account.code,
allowPrivateNetwork: account.allowPrivateNetwork ?? undefined,
});
const uploadedUrl = mediaUrl ? await uploadImageFromUrl(mediaUrl) : undefined;

View File

@ -0,0 +1,301 @@
import crypto from "node:crypto";
import { PutObjectCommand, S3Client } from "@aws-sdk/client-s3";
import { getSignedUrl } from "@aws-sdk/s3-request-presigner";
import { authenticate } from "./urbit/auth.js";
import { scryUrbitPath } from "./urbit/channel-ops.js";
import { ssrfPolicyFromAllowPrivateNetwork } from "./urbit/context.js";
type ClientConfig = {
shipUrl: string;
shipName: string;
verbose: boolean;
getCode: () => Promise<string>;
allowPrivateNetwork?: boolean;
};
type StorageService = "presigned-url" | "credentials";
type StorageConfiguration = {
buckets: string[];
currentBucket: string;
region: string;
publicUrlBase: string;
presignedUrl: string;
service: StorageService;
};
type StorageCredentials = {
endpoint: string;
accessKeyId: string;
secretAccessKey: string;
};
type UploadFileParams = {
blob: Blob;
fileName?: string;
contentType?: string;
};
type UploadResult = {
url: string;
};
const MEMEX_BASE_URL = "https://memex.tlon.network";
const mimeToExt: Record<string, string> = {
"image/gif": ".gif",
"image/heic": ".heic",
"image/heif": ".heif",
"image/jpeg": ".jpg",
"image/jpg": ".jpg",
"image/png": ".png",
"image/webp": ".webp",
};
let currentClientConfig: ClientConfig | null = null;
export function configureClient(params: ClientConfig): void {
currentClientConfig = {
...params,
shipName: params.shipName.replace(/^~/, ""),
};
}
function requireClientConfig(): ClientConfig {
if (!currentClientConfig) {
throw new Error("Tlon client not configured");
}
return currentClientConfig;
}
function getExtensionFromMimeType(mimeType?: string): string {
if (!mimeType) {
return ".jpg";
}
return mimeToExt[mimeType.toLowerCase()] || ".jpg";
}
function hasCustomS3Creds(
credentials: StorageCredentials | null,
): credentials is StorageCredentials {
return Boolean(credentials?.accessKeyId && credentials?.endpoint && credentials?.secretAccessKey);
}
function isStorageCredentials(value: unknown): value is StorageCredentials {
if (!value || typeof value !== "object") {
return false;
}
const record = value as Record<string, unknown>;
return (
typeof record.endpoint === "string" &&
typeof record.accessKeyId === "string" &&
typeof record.secretAccessKey === "string"
);
}
function isHostedShipUrl(shipUrl: string): boolean {
try {
const { hostname } = new URL(shipUrl);
return hostname.endsWith("tlon.network") || hostname.endsWith(".test.tlon.systems");
} catch {
return shipUrl.endsWith("tlon.network") || shipUrl.endsWith(".test.tlon.systems");
}
}
function prefixEndpoint(endpoint: string): string {
return endpoint.match(/https?:\/\//) ? endpoint : `https://${endpoint}`;
}
function sanitizeFileName(fileName: string): string {
return fileName.split(/[/\\]/).pop() || fileName;
}
async function getAuthCookie(config: ClientConfig): Promise<string> {
return await authenticate(config.shipUrl, await config.getCode(), {
ssrfPolicy: ssrfPolicyFromAllowPrivateNetwork(config.allowPrivateNetwork),
});
}
async function scryJson<T>(config: ClientConfig, cookie: string, path: string): Promise<T> {
return (await scryUrbitPath(
{
baseUrl: config.shipUrl,
cookie,
ssrfPolicy: ssrfPolicyFromAllowPrivateNetwork(config.allowPrivateNetwork),
},
{ path, auditContext: "tlon-storage-scry" },
)) as T;
}
async function getStorageConfiguration(
config: ClientConfig,
cookie: string,
): Promise<StorageConfiguration> {
const result = await scryJson<
{ "storage-update"?: { configuration?: StorageConfiguration } } | StorageConfiguration
>(config, cookie, "/storage/configuration.json");
if ("storage-update" in result && result["storage-update"]?.configuration) {
return result["storage-update"].configuration;
}
if ("currentBucket" in result) {
return result;
}
throw new Error("Invalid storage configuration response");
}
async function getStorageCredentials(
config: ClientConfig,
cookie: string,
): Promise<StorageCredentials | null> {
const result = await scryJson<
{ "storage-update"?: { credentials?: StorageCredentials } } | StorageCredentials
>(config, cookie, "/storage/credentials.json");
if ("storage-update" in result) {
return result["storage-update"]?.credentials ?? null;
}
if (isStorageCredentials(result)) {
return result;
}
return null;
}
async function getMemexUploadUrl(params: {
config: ClientConfig;
cookie: string;
contentLength: number;
contentType: string;
fileName: string;
}): Promise<{ hostedUrl: string; uploadUrl: string }> {
const token = await scryJson<string | { secret?: string }>(
params.config,
params.cookie,
"/genuine/secret.json",
);
const resolvedToken = typeof token === "string" ? token : token.secret;
if (!resolvedToken) {
throw new Error("Missing genuine secret");
}
const endpoint = `${MEMEX_BASE_URL}/v1/${params.config.shipName}/upload`;
const response = await fetch(endpoint, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
token: resolvedToken,
contentLength: params.contentLength,
contentType: params.contentType,
fileName: params.fileName,
}),
});
if (!response.ok) {
throw new Error(`Memex upload request failed: ${response.status}`);
}
const data = (await response.json()) as { url?: string; filePath?: string } | null;
if (!data?.url || !data.filePath) {
throw new Error("Invalid response from Memex");
}
return { hostedUrl: data.filePath, uploadUrl: data.url };
}
export async function uploadFile(params: UploadFileParams): Promise<UploadResult> {
const config = requireClientConfig();
const cookie = await getAuthCookie(config);
const [storageConfig, credentials] = await Promise.all([
getStorageConfiguration(config, cookie),
getStorageCredentials(config, cookie),
]);
const contentType = params.contentType || params.blob.type || "application/octet-stream";
const extension = getExtensionFromMimeType(contentType);
const fileName = sanitizeFileName(params.fileName || `upload${extension}`);
const fileKey = `${config.shipName}/${Date.now()}-${crypto.randomUUID()}-${fileName}`;
const useMemex =
isHostedShipUrl(config.shipUrl) &&
(storageConfig.service === "presigned-url" || !hasCustomS3Creds(credentials));
if (useMemex) {
const { hostedUrl, uploadUrl } = await getMemexUploadUrl({
config,
cookie,
contentLength: params.blob.size,
contentType,
fileName: fileKey,
});
const response = await fetch(uploadUrl, {
method: "PUT",
body: params.blob,
headers: {
"Cache-Control": "public, max-age=3600",
"Content-Type": contentType,
},
});
if (!response.ok) {
throw new Error(`Upload failed: ${response.status}`);
}
return { url: hostedUrl };
}
if (!hasCustomS3Creds(credentials)) {
throw new Error("No storage credentials configured");
}
const endpoint = new URL(prefixEndpoint(credentials.endpoint));
const client = new S3Client({
endpoint: {
protocol: endpoint.protocol.slice(0, -1) as "http" | "https",
hostname: endpoint.host,
path: endpoint.pathname || "/",
},
region: storageConfig.region || "us-east-1",
credentials: {
accessKeyId: credentials.accessKeyId,
secretAccessKey: credentials.secretAccessKey,
},
forcePathStyle: true,
});
const headers: Record<string, string> = {
"Cache-Control": "public, max-age=3600",
"Content-Type": contentType,
"x-amz-acl": "public-read",
};
const command = new PutObjectCommand({
Bucket: storageConfig.currentBucket,
Key: fileKey,
ContentType: headers["Content-Type"],
CacheControl: headers["Cache-Control"],
ACL: "public-read",
});
const signedUrl = await getSignedUrl(client, command, {
expiresIn: 3600,
signableHeaders: new Set(Object.keys(headers)),
});
const response = await fetch(signedUrl, {
method: "PUT",
body: params.blob,
headers: signedUrl.includes("digitaloceanspaces.com") ? headers : undefined,
});
if (!response.ok) {
throw new Error(`Upload failed: ${response.status}`);
}
const publicUrl = storageConfig.publicUrlBase
? new URL(fileKey, storageConfig.publicUrlBase).toString()
: signedUrl.split("?")[0];
return { url: publicUrl };
}

View File

@ -9,15 +9,15 @@ vi.mock("openclaw/plugin-sdk/infra-runtime", async (importOriginal) => {
};
});
// Mock @tloncorp/api
vi.mock("@tloncorp/api", () => ({
// Mock the local Tlon upload seam.
vi.mock("../tlon-api.js", () => ({
uploadFile: vi.fn(),
}));
describe("uploadImageFromUrl", () => {
async function loadUploadMocks() {
const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/infra-runtime");
const { uploadFile } = await import("@tloncorp/api");
const { uploadFile } = await import("../tlon-api.js");
const { uploadImageFromUrl } = await import("./upload.js");
return {
mockFetch: vi.mocked(fetchWithSsrFGuard),

View File

@ -1,8 +1,8 @@
/**
* Upload an image from a URL to Tlon storage.
*/
import { uploadFile } from "@tloncorp/api";
import { fetchWithSsrFGuard } from "openclaw/plugin-sdk/infra-runtime";
import { uploadFile } from "../tlon-api.js";
import { getDefaultSsrFPolicy } from "./context.js";
/**

View File

@ -0,0 +1,127 @@
import { afterEach, describe, expect, it } from "vitest";
import { VoiceCallConfigSchema, type VoiceCallConfig } from "./config.js";
import { CallManager } from "./manager.js";
import { createTestStorePath, FakeProvider } from "./manager.test-harness.js";
import type { WebhookContext, WebhookParseOptions } from "./types.js";
import { VoiceCallWebhookServer } from "./webhook.js";
const createConfig = (overrides: Partial<VoiceCallConfig> = {}): VoiceCallConfig => {
const base = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
inboundPolicy: "disabled",
});
base.serve.port = 0;
return {
...base,
...overrides,
serve: {
...base.serve,
...(overrides.serve ?? {}),
},
};
};
async function postWebhookForm(server: VoiceCallWebhookServer, baseUrl: string, body: string) {
const address = (
server as unknown as { server?: { address?: () => unknown } }
).server?.address?.();
const requestUrl = new URL(baseUrl);
if (address && typeof address === "object" && "port" in address && address.port) {
requestUrl.port = String(address.port);
}
return await fetch(requestUrl.toString(), {
method: "POST",
headers: { "content-type": "application/x-www-form-urlencoded" },
body,
});
}
class RejectInboundReplayProvider extends FakeProvider {
override verifyWebhook() {
return { ok: true, verifiedRequestKey: "verified:req:reject-once" };
}
override parseWebhookEvent(_ctx: WebhookContext, options?: WebhookParseOptions) {
return {
statusCode: 200,
events: [
{
id: "evt-reject-once",
dedupeKey: options?.verifiedRequestKey,
type: "call.initiated" as const,
callId: "provider-inbound-1",
providerCallId: "provider-inbound-1",
timestamp: Date.now(),
direction: "inbound" as const,
from: "+15552222222",
to: "+15550000000",
},
],
};
}
}
class RejectInboundReplayWithHangupFailureProvider extends RejectInboundReplayProvider {
override async hangupCall(input: Parameters<FakeProvider["hangupCall"]>[0]): Promise<void> {
this.hangupCalls.push(input);
throw new Error("hangup failed");
}
}
describe("Voice-call webhook hangup-once lifecycle", () => {
afterEach(() => {
// Each test uses an isolated store path, so only server cleanup is needed.
});
it("hangs up a rejected inbound replay only once across duplicate webhook delivery", async () => {
const provider = new RejectInboundReplayProvider("plivo");
const config = createConfig();
const manager = new CallManager(config, createTestStorePath());
await manager.initialize(provider, "https://example.com/voice/webhook");
const server = new VoiceCallWebhookServer(config, manager, provider);
try {
const baseUrl = await server.start();
const first = await postWebhookForm(server, baseUrl, "CallSid=CA123&From=%2B15552222222");
const second = await postWebhookForm(server, baseUrl, "CallSid=CA123&From=%2B15552222222");
expect(first.status).toBe(200);
expect(second.status).toBe(200);
expect(provider.hangupCalls).toHaveLength(1);
expect(provider.hangupCalls[0]).toEqual(
expect.objectContaining({
providerCallId: "provider-inbound-1",
reason: "hangup-bot",
}),
);
expect(manager.getCallByProviderCallId("provider-inbound-1")).toBeUndefined();
} finally {
await server.stop();
}
});
it("does not attempt a second hangup when replay arrives after the first hangup fails", async () => {
const provider = new RejectInboundReplayWithHangupFailureProvider("plivo");
const config = createConfig();
const manager = new CallManager(config, createTestStorePath());
await manager.initialize(provider, "https://example.com/voice/webhook");
const server = new VoiceCallWebhookServer(config, manager, provider);
try {
const baseUrl = await server.start();
const first = await postWebhookForm(server, baseUrl, "CallSid=CA123&From=%2B15552222222");
const second = await postWebhookForm(server, baseUrl, "CallSid=CA123&From=%2B15552222222");
expect(first.status).toBe(200);
expect(second.status).toBe(200);
expect(provider.hangupCalls).toHaveLength(1);
expect(provider.hangupCalls[0]?.providerCallId).toBe("provider-inbound-1");
expect(manager.getCallByProviderCallId("provider-inbound-1")).toBeUndefined();
} finally {
await server.stop();
}
});
});

View File

@ -1,6 +1,6 @@
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
const XIAOMI_BASE_URL = "https://api.xiaomimimo.com/anthropic";
const XIAOMI_BASE_URL = "https://api.xiaomimimo.com/v1";
export const XIAOMI_DEFAULT_MODEL_ID = "mimo-v2-flash";
const XIAOMI_DEFAULT_CONTEXT_WINDOW = 262144;
const XIAOMI_DEFAULT_MAX_TOKENS = 8192;
@ -14,7 +14,7 @@ const XIAOMI_DEFAULT_COST = {
export function buildXiaomiProvider(): ModelProviderConfig {
return {
baseUrl: XIAOMI_BASE_URL,
api: "anthropic-messages",
api: "openai-completions",
models: [
{
id: XIAOMI_DEFAULT_MODEL_ID,
@ -25,6 +25,24 @@ export function buildXiaomiProvider(): ModelProviderConfig {
contextWindow: XIAOMI_DEFAULT_CONTEXT_WINDOW,
maxTokens: XIAOMI_DEFAULT_MAX_TOKENS,
},
{
id: "mimo-v2-pro",
name: "Xiaomi MiMo V2 Pro",
reasoning: true,
input: ["text"],
cost: XIAOMI_DEFAULT_COST,
contextWindow: 1048576,
maxTokens: 32000,
},
{
id: "mimo-v2-omni",
name: "Xiaomi MiMo V2 Omni",
reasoning: true,
input: ["text", "image"],
cost: XIAOMI_DEFAULT_COST,
contextWindow: XIAOMI_DEFAULT_CONTEXT_WINDOW,
maxTokens: 32000,
},
],
};
}

View File

@ -0,0 +1,297 @@
import { createServer, type RequestListener } from "node:http";
import type { AddressInfo } from "node:net";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { createEmptyPluginRegistry } from "../../../src/plugins/registry.js";
import { setActivePluginRegistry } from "../../../src/plugins/runtime.js";
import { createPluginRuntimeMock } from "../../../test/helpers/extensions/plugin-runtime-mock.js";
import type { OpenClawConfig, PluginRuntime } from "../runtime-api.js";
import type { ResolvedZaloAccount } from "./accounts.js";
import { clearZaloWebhookSecurityStateForTest, monitorZaloProvider } from "./monitor.js";
const setWebhookMock = vi.hoisted(() => vi.fn(async () => ({ ok: true, result: { url: "" } })));
const deleteWebhookMock = vi.hoisted(() => vi.fn(async () => ({ ok: true, result: { url: "" } })));
const getWebhookInfoMock = vi.hoisted(() => vi.fn(async () => ({ ok: true, result: { url: "" } })));
const getUpdatesMock = vi.hoisted(() => vi.fn(() => new Promise(() => {})));
const sendChatActionMock = vi.hoisted(() => vi.fn(async () => ({ ok: true })));
const sendMessageMock = vi.hoisted(() =>
vi.fn(async () => ({ ok: true, result: { message_id: "pairing-zalo-1" } })),
);
const sendPhotoMock = vi.hoisted(() => vi.fn(async () => ({ ok: true })));
const getZaloRuntimeMock = vi.hoisted(() => vi.fn());
vi.mock("./api.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("./api.js")>();
return {
...actual,
deleteWebhook: deleteWebhookMock,
getUpdates: getUpdatesMock,
getWebhookInfo: getWebhookInfoMock,
sendChatAction: sendChatActionMock,
sendMessage: sendMessageMock,
sendPhoto: sendPhotoMock,
setWebhook: setWebhookMock,
};
});
vi.mock("./runtime.js", () => ({
getZaloRuntime: getZaloRuntimeMock,
}));
async function withServer(handler: RequestListener, fn: (baseUrl: string) => Promise<void>) {
const server = createServer(handler);
await new Promise<void>((resolve) => {
server.listen(0, "127.0.0.1", () => resolve());
});
const address = server.address() as AddressInfo | null;
if (!address) {
throw new Error("missing server address");
}
try {
await fn(`http://127.0.0.1:${address.port}`);
} finally {
await new Promise<void>((resolve) => server.close(() => resolve()));
}
}
function createLifecycleConfig(): OpenClawConfig {
return {
channels: {
zalo: {
enabled: true,
accounts: {
"acct-zalo-pairing": {
enabled: true,
webhookUrl: "https://example.com/hooks/zalo",
webhookSecret: "supersecret", // pragma: allowlist secret
dmPolicy: "pairing",
allowFrom: [],
},
},
},
},
} as OpenClawConfig;
}
function createLifecycleAccount(): ResolvedZaloAccount {
return {
accountId: "acct-zalo-pairing",
enabled: true,
token: "zalo-token",
tokenSource: "config",
config: {
webhookUrl: "https://example.com/hooks/zalo",
webhookSecret: "supersecret", // pragma: allowlist secret
dmPolicy: "pairing",
allowFrom: [],
},
} as ResolvedZaloAccount;
}
function createRuntimeEnv() {
return {
log: vi.fn<(message: string) => void>(),
error: vi.fn<(message: string) => void>(),
};
}
function createTextUpdate(messageId: string) {
return {
event_name: "message.text.received",
message: {
from: { id: "user-unauthorized", name: "Unauthorized User" },
chat: { id: "dm-pairing-1", chat_type: "PRIVATE" as const },
message_id: messageId,
date: Math.floor(Date.now() / 1000),
text: "hello from zalo",
},
};
}
async function settleAsyncWork(): Promise<void> {
for (let i = 0; i < 6; i += 1) {
await Promise.resolve();
await new Promise((resolve) => setTimeout(resolve, 0));
}
}
async function postWebhookUpdate(params: {
baseUrl: string;
path: string;
secret: string;
payload: Record<string, unknown>;
}) {
return await fetch(`${params.baseUrl}${params.path}`, {
method: "POST",
headers: {
"content-type": "application/json",
"x-bot-api-secret-token": params.secret,
},
body: JSON.stringify(params.payload),
});
}
describe("Zalo pairing lifecycle", () => {
const readAllowFromStoreMock = vi.fn(async () => [] as string[]);
const upsertPairingRequestMock = vi.fn(async () => ({ code: "PAIRCODE", created: true }));
beforeEach(() => {
vi.clearAllMocks();
clearZaloWebhookSecurityStateForTest();
getZaloRuntimeMock.mockReturnValue(
createPluginRuntimeMock({
channel: {
pairing: {
readAllowFromStore:
readAllowFromStoreMock as unknown as PluginRuntime["channel"]["pairing"]["readAllowFromStore"],
upsertPairingRequest:
upsertPairingRequestMock as unknown as PluginRuntime["channel"]["pairing"]["upsertPairingRequest"],
},
commands: {
shouldComputeCommandAuthorized: vi.fn(() => false),
resolveCommandAuthorizedFromAuthorizers: vi.fn(() => false),
},
},
}),
);
});
afterEach(() => {
setActivePluginRegistry(createEmptyPluginRegistry());
});
it("emits one pairing reply across duplicate webhook replay and scopes reads and writes to accountId", async () => {
const registry = createEmptyPluginRegistry();
setActivePluginRegistry(registry);
const abort = new AbortController();
const runtime = createRuntimeEnv();
const run = monitorZaloProvider({
token: "zalo-token",
account: createLifecycleAccount(),
config: createLifecycleConfig(),
runtime,
abortSignal: abort.signal,
useWebhook: true,
webhookUrl: "https://example.com/hooks/zalo",
webhookSecret: "supersecret",
});
await vi.waitFor(() => {
expect(setWebhookMock).toHaveBeenCalledTimes(1);
expect(registry.httpRoutes).toHaveLength(1);
});
const route = registry.httpRoutes[0];
if (!route) {
throw new Error("missing plugin HTTP route");
}
await withServer(
(req, res) => route.handler(req, res),
async (baseUrl) => {
const payload = createTextUpdate(`zalo-pairing-${Date.now()}`);
const first = await postWebhookUpdate({
baseUrl,
path: "/hooks/zalo",
secret: "supersecret",
payload,
});
const second = await postWebhookUpdate({
baseUrl,
path: "/hooks/zalo",
secret: "supersecret",
payload,
});
expect(first.status).toBe(200);
expect(second.status).toBe(200);
await settleAsyncWork();
},
);
expect(readAllowFromStoreMock).toHaveBeenCalledTimes(1);
expect(readAllowFromStoreMock).toHaveBeenCalledWith(
expect.objectContaining({
channel: "zalo",
accountId: "acct-zalo-pairing",
}),
);
expect(upsertPairingRequestMock).toHaveBeenCalledTimes(1);
expect(upsertPairingRequestMock).toHaveBeenCalledWith(
expect.objectContaining({
channel: "zalo",
accountId: "acct-zalo-pairing",
id: "user-unauthorized",
}),
);
expect(sendMessageMock).toHaveBeenCalledTimes(1);
expect(sendMessageMock).toHaveBeenCalledWith(
"zalo-token",
expect.objectContaining({
chat_id: "dm-pairing-1",
text: expect.stringContaining("PAIRCODE"),
}),
undefined,
);
abort.abort();
await run;
});
it("does not emit a second pairing reply when replay arrives after the first send fails", async () => {
sendMessageMock.mockRejectedValueOnce(new Error("pairing send failed"));
const registry = createEmptyPluginRegistry();
setActivePluginRegistry(registry);
const abort = new AbortController();
const runtime = createRuntimeEnv();
const run = monitorZaloProvider({
token: "zalo-token",
account: createLifecycleAccount(),
config: createLifecycleConfig(),
runtime,
abortSignal: abort.signal,
useWebhook: true,
webhookUrl: "https://example.com/hooks/zalo",
webhookSecret: "supersecret",
});
await vi.waitFor(() => {
expect(setWebhookMock).toHaveBeenCalledTimes(1);
expect(registry.httpRoutes).toHaveLength(1);
});
const route = registry.httpRoutes[0];
if (!route) {
throw new Error("missing plugin HTTP route");
}
await withServer(
(req, res) => route.handler(req, res),
async (baseUrl) => {
const payload = createTextUpdate(`zalo-pairing-retry-${Date.now()}`);
const first = await postWebhookUpdate({
baseUrl,
path: "/hooks/zalo",
secret: "supersecret",
payload,
});
await settleAsyncWork();
const replay = await postWebhookUpdate({
baseUrl,
path: "/hooks/zalo",
secret: "supersecret",
payload,
});
expect(first.status).toBe(200);
expect(replay.status).toBe(200);
await settleAsyncWork();
},
);
expect(upsertPairingRequestMock).toHaveBeenCalledTimes(1);
expect(sendMessageMock).toHaveBeenCalledTimes(1);
expect(runtime.error).not.toHaveBeenCalled();
abort.abort();
await run;
});
});

View File

@ -0,0 +1,322 @@
import { createServer, type RequestListener } from "node:http";
import type { AddressInfo } from "node:net";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { createEmptyPluginRegistry } from "../../../src/plugins/registry.js";
import { setActivePluginRegistry } from "../../../src/plugins/runtime.js";
import { createPluginRuntimeMock } from "../../../test/helpers/extensions/plugin-runtime-mock.js";
import type { OpenClawConfig, PluginRuntime } from "../runtime-api.js";
import type { ResolvedZaloAccount } from "./accounts.js";
import { clearZaloWebhookSecurityStateForTest, monitorZaloProvider } from "./monitor.js";
const setWebhookMock = vi.hoisted(() => vi.fn(async () => ({ ok: true, result: { url: "" } })));
const deleteWebhookMock = vi.hoisted(() => vi.fn(async () => ({ ok: true, result: { url: "" } })));
const getWebhookInfoMock = vi.hoisted(() => vi.fn(async () => ({ ok: true, result: { url: "" } })));
const getUpdatesMock = vi.hoisted(() => vi.fn(() => new Promise(() => {})));
const sendChatActionMock = vi.hoisted(() => vi.fn(async () => ({ ok: true })));
const sendMessageMock = vi.hoisted(() =>
vi.fn(async () => ({ ok: true, result: { message_id: "reply-zalo-1" } })),
);
const sendPhotoMock = vi.hoisted(() => vi.fn(async () => ({ ok: true })));
const getZaloRuntimeMock = vi.hoisted(() => vi.fn());
vi.mock("./api.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("./api.js")>();
return {
...actual,
deleteWebhook: deleteWebhookMock,
getUpdates: getUpdatesMock,
getWebhookInfo: getWebhookInfoMock,
sendChatAction: sendChatActionMock,
sendMessage: sendMessageMock,
sendPhoto: sendPhotoMock,
setWebhook: setWebhookMock,
};
});
vi.mock("./runtime.js", () => ({
getZaloRuntime: getZaloRuntimeMock,
}));
async function withServer(handler: RequestListener, fn: (baseUrl: string) => Promise<void>) {
const server = createServer(handler);
await new Promise<void>((resolve) => {
server.listen(0, "127.0.0.1", () => resolve());
});
const address = server.address() as AddressInfo | null;
if (!address) {
throw new Error("missing server address");
}
try {
await fn(`http://127.0.0.1:${address.port}`);
} finally {
await new Promise<void>((resolve) => server.close(() => resolve()));
}
}
function createLifecycleConfig(): OpenClawConfig {
return {
channels: {
zalo: {
enabled: true,
accounts: {
"acct-zalo-lifecycle": {
enabled: true,
webhookUrl: "https://example.com/hooks/zalo",
webhookSecret: "supersecret", // pragma: allowlist secret
dmPolicy: "open",
},
},
},
},
} as OpenClawConfig;
}
function createLifecycleAccount(): ResolvedZaloAccount {
return {
accountId: "acct-zalo-lifecycle",
enabled: true,
token: "zalo-token",
tokenSource: "config",
config: {
webhookUrl: "https://example.com/hooks/zalo",
webhookSecret: "supersecret", // pragma: allowlist secret
dmPolicy: "open",
},
} as ResolvedZaloAccount;
}
function createRuntimeEnv() {
return {
log: vi.fn<(message: string) => void>(),
error: vi.fn<(message: string) => void>(),
};
}
function createTextUpdate(messageId: string) {
return {
event_name: "message.text.received",
message: {
from: { id: "user-1", name: "User One" },
chat: { id: "dm-chat-1", chat_type: "PRIVATE" as const },
message_id: messageId,
date: Math.floor(Date.now() / 1000),
text: "hello from zalo",
},
};
}
async function settleAsyncWork(): Promise<void> {
for (let i = 0; i < 6; i += 1) {
await Promise.resolve();
await new Promise((resolve) => setTimeout(resolve, 0));
}
}
async function postWebhookUpdate(params: {
baseUrl: string;
path: string;
secret: string;
payload: Record<string, unknown>;
}) {
return await fetch(`${params.baseUrl}${params.path}`, {
method: "POST",
headers: {
"content-type": "application/json",
"x-bot-api-secret-token": params.secret,
},
body: JSON.stringify(params.payload),
});
}
describe("Zalo reply-once lifecycle", () => {
const finalizeInboundContextMock = vi.fn((ctx: Record<string, unknown>) => ctx);
const recordInboundSessionMock = vi.fn(async () => undefined);
const resolveAgentRouteMock = vi.fn(() => ({
agentId: "main",
channel: "zalo",
accountId: "acct-zalo-lifecycle",
sessionKey: "agent:main:zalo:direct:dm-chat-1",
mainSessionKey: "agent:main:main",
matchedBy: "default",
}));
const dispatchReplyWithBufferedBlockDispatcherMock = vi.fn();
beforeEach(() => {
vi.clearAllMocks();
clearZaloWebhookSecurityStateForTest();
getZaloRuntimeMock.mockReturnValue(
createPluginRuntimeMock({
channel: {
routing: {
resolveAgentRoute:
resolveAgentRouteMock as unknown as PluginRuntime["channel"]["routing"]["resolveAgentRoute"],
},
reply: {
finalizeInboundContext:
finalizeInboundContextMock as unknown as PluginRuntime["channel"]["reply"]["finalizeInboundContext"],
dispatchReplyWithBufferedBlockDispatcher:
dispatchReplyWithBufferedBlockDispatcherMock as unknown as PluginRuntime["channel"]["reply"]["dispatchReplyWithBufferedBlockDispatcher"],
},
session: {
recordInboundSession:
recordInboundSessionMock as unknown as PluginRuntime["channel"]["session"]["recordInboundSession"],
},
},
}),
);
});
afterEach(() => {
setActivePluginRegistry(createEmptyPluginRegistry());
});
it("routes one accepted webhook event to one visible reply across duplicate replay", async () => {
dispatchReplyWithBufferedBlockDispatcherMock.mockImplementation(
async ({ dispatcherOptions }) => {
await dispatcherOptions.deliver({ text: "zalo reply once" });
},
);
const registry = createEmptyPluginRegistry();
setActivePluginRegistry(registry);
const abort = new AbortController();
const runtime = createRuntimeEnv();
const run = monitorZaloProvider({
token: "zalo-token",
account: createLifecycleAccount(),
config: createLifecycleConfig(),
runtime,
abortSignal: abort.signal,
useWebhook: true,
webhookUrl: "https://example.com/hooks/zalo",
webhookSecret: "supersecret",
});
await vi.waitFor(() => expect(setWebhookMock).toHaveBeenCalledTimes(1));
expect(registry.httpRoutes).toHaveLength(1);
const route = registry.httpRoutes[0];
if (!route) {
throw new Error("missing plugin HTTP route");
}
await withServer(
(req, res) => route.handler(req, res),
async (baseUrl) => {
const payload = createTextUpdate(`zalo-replay-${Date.now()}`);
const first = await postWebhookUpdate({
baseUrl,
path: "/hooks/zalo",
secret: "supersecret",
payload,
});
const second = await postWebhookUpdate({
baseUrl,
path: "/hooks/zalo",
secret: "supersecret",
payload,
});
expect(first.status).toBe(200);
expect(second.status).toBe(200);
await settleAsyncWork();
},
);
expect(finalizeInboundContextMock).toHaveBeenCalledTimes(1);
expect(finalizeInboundContextMock).toHaveBeenCalledWith(
expect.objectContaining({
AccountId: "acct-zalo-lifecycle",
SessionKey: "agent:main:zalo:direct:dm-chat-1",
MessageSid: expect.stringContaining("zalo-replay-"),
From: "zalo:user-1",
To: "zalo:dm-chat-1",
}),
);
expect(recordInboundSessionMock).toHaveBeenCalledTimes(1);
expect(recordInboundSessionMock).toHaveBeenCalledWith(
expect.objectContaining({
sessionKey: "agent:main:zalo:direct:dm-chat-1",
}),
);
expect(sendMessageMock).toHaveBeenCalledTimes(1);
expect(sendMessageMock).toHaveBeenCalledWith(
"zalo-token",
expect.objectContaining({
chat_id: "dm-chat-1",
text: "zalo reply once",
}),
undefined,
);
abort.abort();
await run;
});
it("does not emit a second visible reply when replay arrives after a post-send failure", async () => {
let dispatchAttempts = 0;
dispatchReplyWithBufferedBlockDispatcherMock.mockImplementation(
async ({ dispatcherOptions }) => {
dispatchAttempts += 1;
await dispatcherOptions.deliver({ text: "zalo reply after failure" });
if (dispatchAttempts === 1) {
throw new Error("post-send failure");
}
},
);
const registry = createEmptyPluginRegistry();
setActivePluginRegistry(registry);
const abort = new AbortController();
const runtime = createRuntimeEnv();
const run = monitorZaloProvider({
token: "zalo-token",
account: createLifecycleAccount(),
config: createLifecycleConfig(),
runtime,
abortSignal: abort.signal,
useWebhook: true,
webhookUrl: "https://example.com/hooks/zalo",
webhookSecret: "supersecret",
});
await vi.waitFor(() => expect(setWebhookMock).toHaveBeenCalledTimes(1));
const route = registry.httpRoutes[0];
if (!route) {
throw new Error("missing plugin HTTP route");
}
await withServer(
(req, res) => route.handler(req, res),
async (baseUrl) => {
const payload = createTextUpdate(`zalo-retry-${Date.now()}`);
const first = await postWebhookUpdate({
baseUrl,
path: "/hooks/zalo",
secret: "supersecret",
payload,
});
await settleAsyncWork();
const replay = await postWebhookUpdate({
baseUrl,
path: "/hooks/zalo",
secret: "supersecret",
payload,
});
expect(first.status).toBe(200);
expect(replay.status).toBe(200);
await settleAsyncWork();
},
);
expect(dispatchReplyWithBufferedBlockDispatcherMock).toHaveBeenCalledTimes(1);
expect(sendMessageMock).toHaveBeenCalledTimes(1);
expect(runtime.error).toHaveBeenCalledWith(
expect.stringContaining("Zalo webhook failed: Error: post-send failure"),
);
abort.abort();
await run;
});
});

View File

@ -47,3 +47,11 @@ if [ "${#format_files[@]}" -gt 0 ]; then
fi
git add -- "${files[@]}"
# This hook is also exercised from lightweight temp repos in tests, where the
# staged-file safety behavior matters but the full OpenClaw workspace does not
# exist. Only run the repo-wide gate inside a real checkout.
if [[ -f "$ROOT_DIR/package.json" ]] && [[ -f "$ROOT_DIR/pnpm-lock.yaml" ]]; then
cd "$ROOT_DIR"
pnpm check
fi

View File

@ -77,6 +77,14 @@
"types": "./dist/plugin-sdk/setup.d.ts",
"default": "./dist/plugin-sdk/setup.js"
},
"./plugin-sdk/setup-adapter-runtime": {
"types": "./dist/plugin-sdk/setup-adapter-runtime.d.ts",
"default": "./dist/plugin-sdk/setup-adapter-runtime.js"
},
"./plugin-sdk/setup-runtime": {
"types": "./dist/plugin-sdk/setup-runtime.d.ts",
"default": "./dist/plugin-sdk/setup-runtime.js"
},
"./plugin-sdk/channel-setup": {
"types": "./dist/plugin-sdk/channel-setup.d.ts",
"default": "./dist/plugin-sdk/channel-setup.js"

223
pnpm-lock.yaml generated
View File

@ -309,8 +309,8 @@ importers:
extensions/discord:
dependencies:
'@buape/carbon':
specifier: 0.0.0-beta-20260216184201
version: 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.8)(opusscript@0.1.1)
specifier: 0.0.0-beta-20260317045421
version: 0.0.0-beta-20260317045421(@discordjs/opus@0.10.0)(hono@4.12.8)(opusscript@0.1.1)
'@discordjs/voice':
specifier: ^0.19.2
version: 0.19.2(@discordjs/opus@0.10.0)(opusscript@0.1.1)
@ -533,9 +533,12 @@ importers:
extensions/tlon:
dependencies:
'@tloncorp/api':
specifier: git+https://github.com/tloncorp/api-beta.git#7eede1c1a756977b09f96aa14a92e2b06318ae87
version: https://codeload.github.com/tloncorp/api-beta/tar.gz/7eede1c1a756977b09f96aa14a92e2b06318ae87
'@aws-sdk/client-s3':
specifier: 3.1000.0
version: 3.1000.0
'@aws-sdk/s3-request-presigner':
specifier: 3.1000.0
version: 3.1000.0
'@tloncorp/tlon-skill':
specifier: 0.2.2
version: 0.2.2
@ -988,6 +991,9 @@ packages:
'@buape/carbon@0.0.0-beta-20260216184201':
resolution: {integrity: sha512-u5mgYcigfPVqT7D9gVTGd+3YSflTreQmrWog7ORbb0z5w9eT8ft4rJOdw9fGwr75zMu9kXpSBaAcY2eZoJFSdA==}
'@buape/carbon@0.0.0-beta-20260317045421':
resolution: {integrity: sha512-yM+r5iSxA/iG8CZ2VhK+EkcBQV+y45WLgF7kuczt2Ul1yixjXSCCcM80GppsklfUv7pqM4Dui+7w1WB3f5p7Kg==}
'@cacheable/memory@2.0.7':
resolution: {integrity: sha512-RbxnxAMf89Tp1dLhXMS7ceft/PGsDl1Ip7T20z5nZ+pwIAsQ1p2izPjVG69oCLv/jfQ7HDPHTWK0c9rcAWXN3A==}
@ -2896,34 +2902,18 @@ packages:
resolution: {integrity: sha512-FE3bZdEl62ojmy8x4FHqxq2+BuOHlcxiH5vaZ6aqHJr3AIZzwF5jfx8dEiU/X0a8RboyNDjmXjlbr8AdEyLgiA==}
engines: {node: '>=18.0.0'}
'@smithy/eventstream-serde-browser@4.2.11':
resolution: {integrity: sha512-3rEpo3G6f/nRS7fQDsZmxw/ius6rnlIpz4UX6FlALEzz8JoSxFmdBt0SZnthis+km7sQo6q5/3e+UJcuQivoXA==}
engines: {node: '>=18.0.0'}
'@smithy/eventstream-serde-browser@4.2.12':
resolution: {integrity: sha512-XUSuMxlTxV5pp4VpqZf6Sa3vT/Q75FVkLSpSSE3KkWBvAQWeuWt1msTv8fJfgA4/jcJhrbrbMzN1AC/hvPmm5A==}
engines: {node: '>=18.0.0'}
'@smithy/eventstream-serde-config-resolver@4.3.11':
resolution: {integrity: sha512-XeNIA8tcP/GDWnnKkO7qEm/bg0B/bP9lvIXZBXcGZwZ+VYM8h8k9wuDvUODtdQ2Wcp2RcBkPTCSMmaniVHrMlA==}
engines: {node: '>=18.0.0'}
'@smithy/eventstream-serde-config-resolver@4.3.12':
resolution: {integrity: sha512-7epsAZ3QvfHkngz6RXQYseyZYHlmWXSTPOfPmXkiS+zA6TBNo1awUaMFL9vxyXlGdoELmCZyZe1nQE+imbmV+Q==}
engines: {node: '>=18.0.0'}
'@smithy/eventstream-serde-node@4.2.11':
resolution: {integrity: sha512-fzbCh18rscBDTQSCrsp1fGcclLNF//nJyhjldsEl/5wCYmgpHblv5JSppQAyQI24lClsFT0wV06N1Porn0IsEw==}
engines: {node: '>=18.0.0'}
'@smithy/eventstream-serde-node@4.2.12':
resolution: {integrity: sha512-D1pFuExo31854eAvg89KMn9Oab/wEeJR6Buy32B49A9Ogdtx5fwZPqBHUlDzaCDpycTFk2+fSQgX689Qsk7UGA==}
engines: {node: '>=18.0.0'}
'@smithy/eventstream-serde-universal@4.2.11':
resolution: {integrity: sha512-MJ7HcI+jEkqoWT5vp+uoVaAjBrmxBtKhZTeynDRG/seEjJfqyg3SiqMMqyPnAMzmIfLaeJ/uiuSDP/l9AnMy/Q==}
engines: {node: '>=18.0.0'}
'@smithy/eventstream-serde-universal@4.2.12':
resolution: {integrity: sha512-+yNuTiyBACxOJUTvbsNsSOfH9G9oKbaJE1lNL3YHpGcuucl6rPZMi3nrpehpVOVR2E07YqFFmtwpImtpzlouHQ==}
engines: {node: '>=18.0.0'}
@ -3104,10 +3094,6 @@ packages:
resolution: {integrity: sha512-1zopLDUEOwumjcHdJ1mwBHddubYF8GMQvstVCLC54Y46rqoHwlIU+8ZzUeaBcD+WCJHyDGSeZ2ml9YSe9aqcoQ==}
engines: {node: '>=18.0.0'}
'@smithy/util-stream@4.5.19':
resolution: {integrity: sha512-v4sa+3xTweL1CLO2UP0p7tvIMH/Rq1X4KKOxd568mpe6LSLMQCnDHs4uv7m3ukpl3HvcN2JH6jiCS0SNRXKP/w==}
engines: {node: '>=18.0.0'}
'@smithy/util-stream@4.5.20':
resolution: {integrity: sha512-4yXLm5n/B5SRBR2p8cZ90Sbv4zL4NKsgxdzCzp/83cXw2KxLEumt5p+GAVyRNZgQOSrzXn9ARpO0lUe8XSlSDw==}
engines: {node: '>=18.0.0'}
@ -3237,10 +3223,6 @@ packages:
resolution: {integrity: sha512-5Kc5CM2Ysn3vTTArBs2vESUt0AQiWZA86yc1TI3B+lxXmtEq133C1nxXNOgnzhrivdPZIh3zLj5gDnZjoLL5GA==}
engines: {node: '>=12.17.0'}
'@tloncorp/api@https://codeload.github.com/tloncorp/api-beta/tar.gz/7eede1c1a756977b09f96aa14a92e2b06318ae87':
resolution: {tarball: https://codeload.github.com/tloncorp/api-beta/tar.gz/7eede1c1a756977b09f96aa14a92e2b06318ae87}
version: 0.0.2
'@tloncorp/tlon-skill-darwin-arm64@0.2.2':
resolution: {integrity: sha512-R6RPBZKwOlhJm8BkPCbnhLJ9XKPCCp0a3nq1QUCT2bN4orp/IbKFaqGK2mjZsxzKT8aPPPnRqviqpGioDdItuA==}
cpu: [arm64]
@ -3468,9 +3450,6 @@ packages:
resolution: {integrity: sha512-N8/FHc/lmlMDCumMuTXyRHCxlov5KZY6unmJ9QR2GOw+OpROZMBsXYGwE+ZMtvN21ql9+Xb8KhGNBj08IrG3Wg==}
engines: {node: '>=16', npm: '>=8'}
'@urbit/nockjs@1.6.0':
resolution: {integrity: sha512-f2xCIxoYQh+bp/p6qztvgxnhGsnUwcrSSvW2CUKX7BPPVkDNppQCzCVPWo38TbqgChE7wh6rC1pm6YNCOyFlQA==}
'@vitest/browser-playwright@4.1.0':
resolution: {integrity: sha512-2RU7pZELY9/aVMLmABNy1HeZ4FX23FXGY1jRuHLHgWa2zaAE49aNW2GLzebW+BmbTZIKKyFF1QXvk7DEWViUCQ==}
peerDependencies:
@ -3633,10 +3612,6 @@ packages:
resolution: {integrity: sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig==}
engines: {node: '>=14'}
any-ascii@0.3.3:
resolution: {integrity: sha512-8hm+zPrc1VnlxD5eRgMo9F9k2wEMZhbZVLKwA/sPKIt6ywuz7bI9uV/yb27uvc8fv8q6Wl2piJT51q1saKX0Jw==}
engines: {node: '>=12.20'}
any-base@1.1.0:
resolution: {integrity: sha512-uMgjozySS8adZZYePpaWs8cxB9/kdzmpX6SgJZ+wbz1K5eYk5QMYDVJaZKhxyIHUdnnJkfR7SVgStgH7LkGUyg==}
@ -3793,10 +3768,6 @@ packages:
bidi-js@1.0.3:
resolution: {integrity: sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==}
big-integer@1.6.52:
resolution: {integrity: sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg==}
engines: {node: '>=0.6'}
bignumber.js@9.3.1:
resolution: {integrity: sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==}
@ -3831,9 +3802,6 @@ packages:
resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==}
engines: {node: '>=8'}
browser-or-node@3.0.0:
resolution: {integrity: sha512-iczIdVJzGEYhP5DqQxYM9Hh7Ztpqqi+CXZpSmX8ALFs9ecXkQIeqRyM6TfxEfMVpwhl3dSuDvxdzzo9sUOIVBQ==}
bs58@6.0.0:
resolution: {integrity: sha512-PD0wEnEYg6ijszw/u8s+iI3H17cTymlrwkKhDhPZq+Sokl3AU4htyBFTjAeNAlCCmg0f53g6ih3jATyCKftTfw==}
@ -3846,9 +3814,6 @@ packages:
buffer-from@1.1.2:
resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==}
buffer@6.0.3:
resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==}
bun-types@1.3.9:
resolution: {integrity: sha512-+UBWWOakIP4Tswh0Bt0QD0alpTY8cb5hvgiYeWCMet9YukHbzuruIEeXC2D7nMJPB12kbh8C7XJykSexEqGKJg==}
@ -4067,9 +4032,6 @@ packages:
resolution: {integrity: sha512-23XHcCF+coGYevirZceTVD7NdJOqVn+49IHyxgszm+JIiHLoB2TkmPtsYkNWT1pvRSGkc35L6NHs0yHkN2SumA==}
engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0}
date-fns@3.6.0:
resolution: {integrity: sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==}
debug@4.4.3:
resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==}
engines: {node: '>=6.0'}
@ -4298,9 +4260,6 @@ packages:
resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==}
engines: {node: '>=12.0.0'}
exponential-backoff@3.1.3:
resolution: {integrity: sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==}
express-rate-limit@8.3.1:
resolution: {integrity: sha512-D1dKN+cmyPWuvB+G2SREQDzPY1agpBIcTa9sJxOPMCNeH3gwzhqJRDWCXW3gg0y//+LQ/8j52JbMROWyrKdMdw==}
engines: {node: '>= 16'}
@ -4872,9 +4831,6 @@ packages:
koffi@2.15.2:
resolution: {integrity: sha512-r9tjJLVRSOhCRWdVyQlF3/Ugzeg13jlzS4czS82MAgLff4W+BcYOW7g8Y62t9O5JYjYOLAjAovAZDNlDfZNu+g==}
libphonenumber-js@1.12.38:
resolution: {integrity: sha512-vwzxmasAy9hZigxtqTbFEwp8ZdZ975TiqVDwj5bKx5sR+zi5ucUQy9mbVTkKM9GzqdLdxux/hTw2nmN5J7POMA==}
lie@3.3.0:
resolution: {integrity: sha512-UaiMJzeWRlEujzAuw5LokY1L5ecNQYZKfmyZ9L7wDHb/p5etKaxXhohBcrw0EYby+G/NA52vRSN4N39dxHAIwQ==}
@ -5017,9 +4973,6 @@ packages:
lodash.pickby@4.6.0:
resolution: {integrity: sha512-AZV+GsS/6ckvPOVQPXSiFFacKvKB4kOQu6ynt9wz0F3LO4R9Ij4K1ddYsIytDpSgLz88JHd9P+oaLeej5/Sl7Q==}
lodash@4.17.23:
resolution: {integrity: sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==}
log-symbols@7.0.1:
resolution: {integrity: sha512-ja1E3yCr9i/0hmBVaM0bfwDjnGy8I/s6PP4DFp+yP+a+mrHO4Rm7DtmnqROTUkHIkqffC84YY7AeqX6oFk0WFg==}
engines: {node: '>=18'}
@ -6025,9 +5978,6 @@ packages:
sonic-boom@4.2.1:
resolution: {integrity: sha512-w6AxtubXa2wTXAUsZMMWERrsIRAdrK0Sc+FUytWvYAhBJLyuI4llrMIC1DtlNSdI99EI86KZum2MMq3EAZlF9Q==}
sorted-btree@1.8.1:
resolution: {integrity: sha512-395+XIP+wqNn3USkFSrNz7G3Ss/MXlZEqesxvzCRFwL14h6e8LukDHdLBePn5pwbm5OQ9vGu8mDyz2lLDIqamQ==}
source-map-js@1.2.1:
resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==}
engines: {node: '>=0.10.0'}
@ -6454,10 +6404,6 @@ packages:
resolution: {integrity: sha512-hVDIBwsRruT73PbK7uP5ebUt+ezEtCmzZz3F59BSr2F6OVFnJ/6h8liuvdLrQ88Xmnk6/+xGGuq+pG9WwTuy3A==}
engines: {node: ^20.17.0 || >=22.9.0}
validator@13.15.26:
resolution: {integrity: sha512-spH26xU080ydGggxRyR1Yhcbgx+j3y5jbNXk/8L+iRvdIEQ4uTRH2Sgf2dokud6Q4oAtsbNvJ1Ft+9xmm6IZcA==}
engines: {node: '>= 0.10'}
vary@1.1.2:
resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==}
engines: {node: '>= 0.8'}
@ -6805,33 +6751,33 @@ snapshots:
'@aws-sdk/util-user-agent-browser': 3.972.8
'@aws-sdk/util-user-agent-node': 3.973.7
'@smithy/config-resolver': 4.4.11
'@smithy/core': 3.23.11
'@smithy/eventstream-serde-browser': 4.2.11
'@smithy/eventstream-serde-config-resolver': 4.3.11
'@smithy/eventstream-serde-node': 4.2.11
'@smithy/core': 3.23.12
'@smithy/eventstream-serde-browser': 4.2.12
'@smithy/eventstream-serde-config-resolver': 4.3.12
'@smithy/eventstream-serde-node': 4.2.12
'@smithy/fetch-http-handler': 5.3.15
'@smithy/hash-node': 4.2.12
'@smithy/invalid-dependency': 4.2.12
'@smithy/middleware-content-length': 4.2.12
'@smithy/middleware-endpoint': 4.4.25
'@smithy/middleware-retry': 4.4.42
'@smithy/middleware-serde': 4.2.14
'@smithy/middleware-endpoint': 4.4.26
'@smithy/middleware-retry': 4.4.43
'@smithy/middleware-serde': 4.2.15
'@smithy/middleware-stack': 4.2.12
'@smithy/node-config-provider': 4.3.12
'@smithy/node-http-handler': 4.4.16
'@smithy/node-http-handler': 4.5.0
'@smithy/protocol-http': 5.3.12
'@smithy/smithy-client': 4.12.5
'@smithy/smithy-client': 4.12.6
'@smithy/types': 4.13.1
'@smithy/url-parser': 4.2.12
'@smithy/util-base64': 4.3.2
'@smithy/util-body-length-browser': 4.2.2
'@smithy/util-body-length-node': 4.2.3
'@smithy/util-defaults-mode-browser': 4.3.41
'@smithy/util-defaults-mode-node': 4.2.44
'@smithy/util-defaults-mode-browser': 4.3.42
'@smithy/util-defaults-mode-node': 4.2.45
'@smithy/util-endpoints': 3.3.3
'@smithy/util-middleware': 4.2.12
'@smithy/util-retry': 4.2.12
'@smithy/util-stream': 4.5.19
'@smithy/util-stream': 4.5.20
'@smithy/util-utf8': 4.2.2
tslib: 2.8.1
transitivePeerDependencies:
@ -7551,7 +7497,27 @@ snapshots:
dependencies:
css-tree: 3.2.1
'@buape/carbon@0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.8)(opusscript@0.1.1)':
'@buape/carbon@0.0.0-beta-20260216184201(hono@4.12.8)(opusscript@0.1.1)':
dependencies:
'@types/node': 25.5.0
discord-api-types: 0.38.37
optionalDependencies:
'@cloudflare/workers-types': 4.20260120.0
'@discordjs/voice': 0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1)
'@hono/node-server': 1.19.10(hono@4.12.8)
'@types/bun': 1.3.9
'@types/ws': 8.18.1
ws: 8.19.0
transitivePeerDependencies:
- '@discordjs/opus'
- bufferutil
- ffmpeg-static
- hono
- node-opus
- opusscript
- utf-8-validate
'@buape/carbon@0.0.0-beta-20260317045421(@discordjs/opus@0.10.0)(hono@4.12.8)(opusscript@0.1.1)':
dependencies:
'@types/node': 25.5.0
discord-api-types: 0.38.37
@ -9620,7 +9586,7 @@ snapshots:
'@smithy/util-base64': 4.3.2
'@smithy/util-body-length-browser': 4.2.2
'@smithy/util-middleware': 4.2.12
'@smithy/util-stream': 4.5.19
'@smithy/util-stream': 4.5.20
'@smithy/util-utf8': 4.2.2
'@smithy/uuid': 1.1.2
tslib: 2.8.1
@ -9660,46 +9626,23 @@ snapshots:
'@smithy/util-hex-encoding': 4.2.2
tslib: 2.8.1
'@smithy/eventstream-serde-browser@4.2.11':
dependencies:
'@smithy/eventstream-serde-universal': 4.2.11
'@smithy/types': 4.13.1
tslib: 2.8.1
'@smithy/eventstream-serde-browser@4.2.12':
dependencies:
'@smithy/eventstream-serde-universal': 4.2.12
'@smithy/types': 4.13.1
tslib: 2.8.1
'@smithy/eventstream-serde-config-resolver@4.3.11':
dependencies:
'@smithy/types': 4.13.1
tslib: 2.8.1
'@smithy/eventstream-serde-config-resolver@4.3.12':
dependencies:
'@smithy/types': 4.13.1
tslib: 2.8.1
'@smithy/eventstream-serde-node@4.2.11':
dependencies:
'@smithy/eventstream-serde-universal': 4.2.11
'@smithy/types': 4.13.1
tslib: 2.8.1
'@smithy/eventstream-serde-node@4.2.12':
dependencies:
'@smithy/eventstream-serde-universal': 4.2.12
'@smithy/types': 4.13.1
tslib: 2.8.1
'@smithy/eventstream-serde-universal@4.2.11':
dependencies:
'@smithy/eventstream-codec': 4.2.11
'@smithy/types': 4.13.1
tslib: 2.8.1
'@smithy/eventstream-serde-universal@4.2.12':
dependencies:
'@smithy/eventstream-codec': 4.2.12
@ -9761,8 +9704,8 @@ snapshots:
'@smithy/middleware-endpoint@4.4.25':
dependencies:
'@smithy/core': 3.23.11
'@smithy/middleware-serde': 4.2.14
'@smithy/core': 3.23.12
'@smithy/middleware-serde': 4.2.15
'@smithy/node-config-provider': 4.3.12
'@smithy/shared-ini-file-loader': 4.4.7
'@smithy/types': 4.13.1
@ -9786,7 +9729,7 @@ snapshots:
'@smithy/node-config-provider': 4.3.12
'@smithy/protocol-http': 5.3.12
'@smithy/service-error-classification': 4.2.12
'@smithy/smithy-client': 4.12.5
'@smithy/smithy-client': 4.12.6
'@smithy/types': 4.13.1
'@smithy/util-middleware': 4.2.12
'@smithy/util-retry': 4.2.12
@ -9807,7 +9750,7 @@ snapshots:
'@smithy/middleware-serde@4.2.14':
dependencies:
'@smithy/core': 3.23.11
'@smithy/core': 3.23.12
'@smithy/protocol-http': 5.3.12
'@smithy/types': 4.13.1
tslib: 2.8.1
@ -9890,12 +9833,12 @@ snapshots:
'@smithy/smithy-client@4.12.5':
dependencies:
'@smithy/core': 3.23.11
'@smithy/middleware-endpoint': 4.4.25
'@smithy/core': 3.23.12
'@smithy/middleware-endpoint': 4.4.26
'@smithy/middleware-stack': 4.2.12
'@smithy/protocol-http': 5.3.12
'@smithy/types': 4.13.1
'@smithy/util-stream': 4.5.19
'@smithy/util-stream': 4.5.20
tslib: 2.8.1
'@smithy/smithy-client@4.12.6':
@ -9949,7 +9892,7 @@ snapshots:
'@smithy/util-defaults-mode-browser@4.3.41':
dependencies:
'@smithy/property-provider': 4.2.12
'@smithy/smithy-client': 4.12.5
'@smithy/smithy-client': 4.12.6
'@smithy/types': 4.13.1
tslib: 2.8.1
@ -9966,7 +9909,7 @@ snapshots:
'@smithy/credential-provider-imds': 4.2.12
'@smithy/node-config-provider': 4.3.12
'@smithy/property-provider': 4.2.12
'@smithy/smithy-client': 4.12.5
'@smithy/smithy-client': 4.12.6
'@smithy/types': 4.13.1
tslib: 2.8.1
@ -10001,17 +9944,6 @@ snapshots:
'@smithy/types': 4.13.1
tslib: 2.8.1
'@smithy/util-stream@4.5.19':
dependencies:
'@smithy/fetch-http-handler': 5.3.15
'@smithy/node-http-handler': 4.5.0
'@smithy/types': 4.13.1
'@smithy/util-base64': 4.3.2
'@smithy/util-buffer-from': 4.2.2
'@smithy/util-hex-encoding': 4.2.2
'@smithy/util-utf8': 4.2.2
tslib: 2.8.1
'@smithy/util-stream@4.5.20':
dependencies:
'@smithy/fetch-http-handler': 5.3.15
@ -10124,26 +10056,6 @@ snapshots:
'@tinyhttp/content-disposition@2.2.4': {}
'@tloncorp/api@https://codeload.github.com/tloncorp/api-beta/tar.gz/7eede1c1a756977b09f96aa14a92e2b06318ae87':
dependencies:
'@aws-sdk/client-s3': 3.1000.0
'@aws-sdk/s3-request-presigner': 3.1000.0
'@urbit/aura': 3.0.0
'@urbit/nockjs': 1.6.0
any-ascii: 0.3.3
big-integer: 1.6.52
browser-or-node: 3.0.0
buffer: 6.0.3
date-fns: 3.6.0
emoji-regex: 10.6.0
exponential-backoff: 3.1.3
libphonenumber-js: 1.12.38
lodash: 4.17.23
sorted-btree: 1.8.1
validator: 13.15.26
transitivePeerDependencies:
- aws-crt
'@tloncorp/tlon-skill-darwin-arm64@0.2.2':
optional: true
@ -10398,8 +10310,6 @@ snapshots:
'@urbit/aura@3.0.0': {}
'@urbit/nockjs@1.6.0': {}
'@vitest/browser-playwright@4.1.0(playwright@1.58.2)(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.1.0)':
dependencies:
'@vitest/browser': 4.1.0(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.1.0)
@ -10605,8 +10515,6 @@ snapshots:
ansis@4.2.0: {}
any-ascii@0.3.3: {}
any-base@1.1.0:
optional: true
@ -10765,8 +10673,6 @@ snapshots:
dependencies:
require-from-string: 2.0.2
big-integer@1.6.52: {}
bignumber.js@9.3.1: {}
birpc@4.0.0: {}
@ -10807,8 +10713,6 @@ snapshots:
dependencies:
fill-range: 7.1.1
browser-or-node@3.0.0: {}
bs58@6.0.0:
dependencies:
base-x: 5.0.1
@ -10819,11 +10723,6 @@ snapshots:
buffer-from@1.1.2: {}
buffer@6.0.3:
dependencies:
base64-js: 1.5.1
ieee754: 1.2.1
bun-types@1.3.9:
dependencies:
'@types/node': 25.5.0
@ -11040,8 +10939,6 @@ snapshots:
transitivePeerDependencies:
- '@noble/hashes'
date-fns@3.6.0: {}
debug@4.4.3:
dependencies:
ms: 2.1.3
@ -11250,8 +11147,6 @@ snapshots:
expect-type@1.3.0: {}
exponential-backoff@3.1.3: {}
express-rate-limit@8.3.1(express@5.2.1):
dependencies:
express: 5.2.1
@ -12012,8 +11907,6 @@ snapshots:
koffi@2.15.2:
optional: true
libphonenumber-js@1.12.38: {}
lie@3.3.0:
dependencies:
immediate: 3.0.6
@ -12127,8 +12020,6 @@ snapshots:
lodash.pickby@4.6.0: {}
lodash@4.17.23: {}
log-symbols@7.0.1:
dependencies:
is-unicode-supported: 2.1.0
@ -12547,7 +12438,7 @@ snapshots:
dependencies:
'@agentclientprotocol/sdk': 0.16.1(zod@4.3.6)
'@aws-sdk/client-bedrock': 3.1009.0
'@buape/carbon': 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.8)(opusscript@0.1.1)
'@buape/carbon': 0.0.0-beta-20260216184201(hono@4.12.8)(opusscript@0.1.1)
'@clack/prompts': 1.1.0
'@discordjs/voice': 0.19.2(@discordjs/opus@0.10.0)(opusscript@0.1.1)
'@grammyjs/runner': 2.0.3(grammy@1.41.1)
@ -13406,8 +13297,6 @@ snapshots:
dependencies:
atomic-sleep: 1.0.0
sorted-btree@1.8.1: {}
source-map-js@1.2.1: {}
source-map-support@0.5.21:
@ -13798,8 +13687,6 @@ snapshots:
validate-npm-package-name@7.0.2: {}
validator@13.15.26: {}
vary@1.1.2: {}
vfile-message@4.0.3:

View File

@ -32,9 +32,9 @@ writeFileSync(
);
const DEFAULT_LIMITS_MB = {
help: 500,
statusJson: 925,
gatewayStatus: 900,
help: 100,
statusJson: 400,
gatewayStatus: 500,
};
const cases = [

View File

@ -21,6 +21,15 @@ RUN --mount=type=cache,id=openclaw-install-sh-nonroot-apt-cache,target=/var/cach
python3 \
sudo
# Preinstall the supported Node runtime in a cacheable build layer so the
# non-root smoke covers user-local npm prefixing and missing git without paying
# the full NodeSource bootstrap cost on every container run.
RUN --mount=type=cache,id=openclaw-install-sh-nonroot-apt-cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,id=openclaw-install-sh-nonroot-apt-lists,target=/var/lib/apt,sharing=locked \
set -eux; \
curl -fsSL https://deb.nodesource.com/setup_24.x | bash -; \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends nodejs
RUN useradd -m -s /bin/bash app \
&& echo "app ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/app

View File

@ -15,6 +15,19 @@ if command -v git >/dev/null; then
exit 1
fi
echo "==> Pre-flight: ensure supported Node is already present"
node -e '
const version = process.versions.node.split(".").map(Number);
const ok =
version.length >= 2 &&
(version[0] > 22 || (version[0] === 22 && version[1] >= 16));
if (!ok) {
process.stderr.write(`unsupported node ${process.versions.node}\n`);
process.exit(1);
}
'
command -v npm >/dev/null
echo "==> Run installer (non-root user)"
curl -fsSL "$INSTALL_URL" | bash

616
scripts/docker/setup.sh Executable file
View File

@ -0,0 +1,616 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
EXTRA_COMPOSE_FILE="$ROOT_DIR/docker-compose.extra.yml"
IMAGE_NAME="${OPENCLAW_IMAGE:-openclaw:local}"
EXTRA_MOUNTS="${OPENCLAW_EXTRA_MOUNTS:-}"
HOME_VOLUME_NAME="${OPENCLAW_HOME_VOLUME:-}"
RAW_SANDBOX_SETTING="${OPENCLAW_SANDBOX:-}"
SANDBOX_ENABLED=""
DOCKER_SOCKET_PATH="${OPENCLAW_DOCKER_SOCKET:-}"
TIMEZONE="${OPENCLAW_TZ:-}"
fail() {
echo "ERROR: $*" >&2
exit 1
}
require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "Missing dependency: $1" >&2
exit 1
fi
}
is_truthy_value() {
local raw="${1:-}"
raw="$(printf '%s' "$raw" | tr '[:upper:]' '[:lower:]')"
case "$raw" in
1 | true | yes | on) return 0 ;;
*) return 1 ;;
esac
}
read_config_gateway_token() {
local config_path="$OPENCLAW_CONFIG_DIR/openclaw.json"
if [[ ! -f "$config_path" ]]; then
return 0
fi
if command -v python3 >/dev/null 2>&1; then
python3 - "$config_path" <<'PY'
import json
import sys
path = sys.argv[1]
try:
with open(path, "r", encoding="utf-8") as f:
cfg = json.load(f)
except Exception:
raise SystemExit(0)
gateway = cfg.get("gateway")
if not isinstance(gateway, dict):
raise SystemExit(0)
auth = gateway.get("auth")
if not isinstance(auth, dict):
raise SystemExit(0)
token = auth.get("token")
if isinstance(token, str):
token = token.strip()
if token:
print(token)
PY
return 0
fi
if command -v node >/dev/null 2>&1; then
node - "$config_path" <<'NODE'
const fs = require("node:fs");
const configPath = process.argv[2];
try {
const cfg = JSON.parse(fs.readFileSync(configPath, "utf8"));
const token = cfg?.gateway?.auth?.token;
if (typeof token === "string" && token.trim().length > 0) {
process.stdout.write(token.trim());
}
} catch {
// Keep docker-setup resilient when config parsing fails.
}
NODE
fi
}
read_env_gateway_token() {
local env_path="$1"
local line=""
local token=""
if [[ ! -f "$env_path" ]]; then
return 0
fi
while IFS= read -r line || [[ -n "$line" ]]; do
line="${line%$'\r'}"
if [[ "$line" == OPENCLAW_GATEWAY_TOKEN=* ]]; then
token="${line#OPENCLAW_GATEWAY_TOKEN=}"
fi
done <"$env_path"
if [[ -n "$token" ]]; then
printf '%s' "$token"
fi
}
ensure_control_ui_allowed_origins() {
if [[ "${OPENCLAW_GATEWAY_BIND}" == "loopback" ]]; then
return 0
fi
local allowed_origin_json
local current_allowed_origins
allowed_origin_json="$(printf '["http://127.0.0.1:%s"]' "$OPENCLAW_GATEWAY_PORT")"
current_allowed_origins="$(
docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \
config get gateway.controlUi.allowedOrigins 2>/dev/null || true
)"
current_allowed_origins="${current_allowed_origins//$'\r'/}"
if [[ -n "$current_allowed_origins" && "$current_allowed_origins" != "null" && "$current_allowed_origins" != "[]" ]]; then
echo "Control UI allowlist already configured; leaving gateway.controlUi.allowedOrigins unchanged."
return 0
fi
docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \
config set gateway.controlUi.allowedOrigins "$allowed_origin_json" --strict-json >/dev/null
echo "Set gateway.controlUi.allowedOrigins to $allowed_origin_json for non-loopback bind."
}
sync_gateway_mode_and_bind() {
docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \
config set gateway.mode local >/dev/null
docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \
config set gateway.bind "$OPENCLAW_GATEWAY_BIND" >/dev/null
echo "Pinned gateway.mode=local and gateway.bind=$OPENCLAW_GATEWAY_BIND for Docker setup."
}
contains_disallowed_chars() {
local value="$1"
[[ "$value" == *$'\n'* || "$value" == *$'\r'* || "$value" == *$'\t'* ]]
}
is_valid_timezone() {
local value="$1"
[[ -e "/usr/share/zoneinfo/$value" && ! -d "/usr/share/zoneinfo/$value" ]]
}
validate_mount_path_value() {
local label="$1"
local value="$2"
if [[ -z "$value" ]]; then
fail "$label cannot be empty."
fi
if contains_disallowed_chars "$value"; then
fail "$label contains unsupported control characters."
fi
if [[ "$value" =~ [[:space:]] ]]; then
fail "$label cannot contain whitespace."
fi
}
validate_named_volume() {
local value="$1"
if [[ ! "$value" =~ ^[A-Za-z0-9][A-Za-z0-9_.-]*$ ]]; then
fail "OPENCLAW_HOME_VOLUME must match [A-Za-z0-9][A-Za-z0-9_.-]* when using a named volume."
fi
}
validate_mount_spec() {
local mount="$1"
if contains_disallowed_chars "$mount"; then
fail "OPENCLAW_EXTRA_MOUNTS entries cannot contain control characters."
fi
# Keep mount specs strict to avoid YAML structure injection.
# Expected format: source:target[:options]
if [[ ! "$mount" =~ ^[^[:space:],:]+:[^[:space:],:]+(:[^[:space:],:]+)?$ ]]; then
fail "Invalid mount format '$mount'. Expected source:target[:options] without spaces."
fi
}
require_cmd docker
if ! docker compose version >/dev/null 2>&1; then
echo "Docker Compose not available (try: docker compose version)" >&2
exit 1
fi
if [[ -z "$DOCKER_SOCKET_PATH" && "${DOCKER_HOST:-}" == unix://* ]]; then
DOCKER_SOCKET_PATH="${DOCKER_HOST#unix://}"
fi
if [[ -z "$DOCKER_SOCKET_PATH" ]]; then
DOCKER_SOCKET_PATH="/var/run/docker.sock"
fi
if is_truthy_value "$RAW_SANDBOX_SETTING"; then
SANDBOX_ENABLED="1"
fi
OPENCLAW_CONFIG_DIR="${OPENCLAW_CONFIG_DIR:-$HOME/.openclaw}"
OPENCLAW_WORKSPACE_DIR="${OPENCLAW_WORKSPACE_DIR:-$HOME/.openclaw/workspace}"
validate_mount_path_value "OPENCLAW_CONFIG_DIR" "$OPENCLAW_CONFIG_DIR"
validate_mount_path_value "OPENCLAW_WORKSPACE_DIR" "$OPENCLAW_WORKSPACE_DIR"
if [[ -n "$HOME_VOLUME_NAME" ]]; then
if [[ "$HOME_VOLUME_NAME" == *"/"* ]]; then
validate_mount_path_value "OPENCLAW_HOME_VOLUME" "$HOME_VOLUME_NAME"
else
validate_named_volume "$HOME_VOLUME_NAME"
fi
fi
if contains_disallowed_chars "$EXTRA_MOUNTS"; then
fail "OPENCLAW_EXTRA_MOUNTS cannot contain control characters."
fi
if [[ -n "$SANDBOX_ENABLED" ]]; then
validate_mount_path_value "OPENCLAW_DOCKER_SOCKET" "$DOCKER_SOCKET_PATH"
fi
if [[ -n "$TIMEZONE" ]]; then
if contains_disallowed_chars "$TIMEZONE"; then
fail "OPENCLAW_TZ contains unsupported control characters."
fi
if [[ ! "$TIMEZONE" =~ ^[A-Za-z0-9/_+\-]+$ ]]; then
fail "OPENCLAW_TZ must be a valid IANA timezone string (e.g. Asia/Shanghai)."
fi
if ! is_valid_timezone "$TIMEZONE"; then
fail "OPENCLAW_TZ must match a timezone in /usr/share/zoneinfo (e.g. Asia/Shanghai)."
fi
fi
mkdir -p "$OPENCLAW_CONFIG_DIR"
mkdir -p "$OPENCLAW_WORKSPACE_DIR"
# Seed directory tree eagerly so bind mounts work even on Docker Desktop/Windows
# where the container (even as root) cannot create new host subdirectories.
mkdir -p "$OPENCLAW_CONFIG_DIR/identity"
mkdir -p "$OPENCLAW_CONFIG_DIR/agents/main/agent"
mkdir -p "$OPENCLAW_CONFIG_DIR/agents/main/sessions"
export OPENCLAW_CONFIG_DIR
export OPENCLAW_WORKSPACE_DIR
export OPENCLAW_GATEWAY_PORT="${OPENCLAW_GATEWAY_PORT:-18789}"
export OPENCLAW_BRIDGE_PORT="${OPENCLAW_BRIDGE_PORT:-18790}"
export OPENCLAW_GATEWAY_BIND="${OPENCLAW_GATEWAY_BIND:-lan}"
export OPENCLAW_IMAGE="$IMAGE_NAME"
export OPENCLAW_DOCKER_APT_PACKAGES="${OPENCLAW_DOCKER_APT_PACKAGES:-}"
export OPENCLAW_EXTENSIONS="${OPENCLAW_EXTENSIONS:-}"
export OPENCLAW_EXTRA_MOUNTS="$EXTRA_MOUNTS"
export OPENCLAW_HOME_VOLUME="$HOME_VOLUME_NAME"
export OPENCLAW_ALLOW_INSECURE_PRIVATE_WS="${OPENCLAW_ALLOW_INSECURE_PRIVATE_WS:-}"
export OPENCLAW_SANDBOX="$SANDBOX_ENABLED"
export OPENCLAW_DOCKER_SOCKET="$DOCKER_SOCKET_PATH"
export OPENCLAW_TZ="$TIMEZONE"
# Detect Docker socket GID for sandbox group_add.
DOCKER_GID=""
if [[ -n "$SANDBOX_ENABLED" && -S "$DOCKER_SOCKET_PATH" ]]; then
DOCKER_GID="$(stat -c '%g' "$DOCKER_SOCKET_PATH" 2>/dev/null || stat -f '%g' "$DOCKER_SOCKET_PATH" 2>/dev/null || echo "")"
fi
export DOCKER_GID
if [[ -z "${OPENCLAW_GATEWAY_TOKEN:-}" ]]; then
EXISTING_CONFIG_TOKEN="$(read_config_gateway_token || true)"
if [[ -n "$EXISTING_CONFIG_TOKEN" ]]; then
OPENCLAW_GATEWAY_TOKEN="$EXISTING_CONFIG_TOKEN"
echo "Reusing gateway token from $OPENCLAW_CONFIG_DIR/openclaw.json"
else
DOTENV_GATEWAY_TOKEN="$(read_env_gateway_token "$ROOT_DIR/.env" || true)"
if [[ -n "$DOTENV_GATEWAY_TOKEN" ]]; then
OPENCLAW_GATEWAY_TOKEN="$DOTENV_GATEWAY_TOKEN"
echo "Reusing gateway token from $ROOT_DIR/.env"
elif command -v openssl >/dev/null 2>&1; then
OPENCLAW_GATEWAY_TOKEN="$(openssl rand -hex 32)"
else
OPENCLAW_GATEWAY_TOKEN="$(python3 - <<'PY'
import secrets
print(secrets.token_hex(32))
PY
)"
fi
fi
fi
export OPENCLAW_GATEWAY_TOKEN
COMPOSE_FILES=("$COMPOSE_FILE")
COMPOSE_ARGS=()
write_extra_compose() {
local home_volume="$1"
shift
local mount
local gateway_home_mount
local gateway_config_mount
local gateway_workspace_mount
cat >"$EXTRA_COMPOSE_FILE" <<'YAML'
services:
openclaw-gateway:
volumes:
YAML
if [[ -n "$home_volume" ]]; then
gateway_home_mount="${home_volume}:/home/node"
gateway_config_mount="${OPENCLAW_CONFIG_DIR}:/home/node/.openclaw"
gateway_workspace_mount="${OPENCLAW_WORKSPACE_DIR}:/home/node/.openclaw/workspace"
validate_mount_spec "$gateway_home_mount"
validate_mount_spec "$gateway_config_mount"
validate_mount_spec "$gateway_workspace_mount"
printf ' - %s\n' "$gateway_home_mount" >>"$EXTRA_COMPOSE_FILE"
printf ' - %s\n' "$gateway_config_mount" >>"$EXTRA_COMPOSE_FILE"
printf ' - %s\n' "$gateway_workspace_mount" >>"$EXTRA_COMPOSE_FILE"
fi
for mount in "$@"; do
validate_mount_spec "$mount"
printf ' - %s\n' "$mount" >>"$EXTRA_COMPOSE_FILE"
done
cat >>"$EXTRA_COMPOSE_FILE" <<'YAML'
openclaw-cli:
volumes:
YAML
if [[ -n "$home_volume" ]]; then
printf ' - %s\n' "$gateway_home_mount" >>"$EXTRA_COMPOSE_FILE"
printf ' - %s\n' "$gateway_config_mount" >>"$EXTRA_COMPOSE_FILE"
printf ' - %s\n' "$gateway_workspace_mount" >>"$EXTRA_COMPOSE_FILE"
fi
for mount in "$@"; do
validate_mount_spec "$mount"
printf ' - %s\n' "$mount" >>"$EXTRA_COMPOSE_FILE"
done
if [[ -n "$home_volume" && "$home_volume" != *"/"* ]]; then
validate_named_volume "$home_volume"
cat >>"$EXTRA_COMPOSE_FILE" <<YAML
volumes:
${home_volume}:
YAML
fi
}
# When sandbox is requested, ensure Docker CLI build arg is set for local builds.
# Docker socket mount is deferred until sandbox prerequisites are verified.
if [[ -n "$SANDBOX_ENABLED" ]]; then
if [[ -z "${OPENCLAW_INSTALL_DOCKER_CLI:-}" ]]; then
export OPENCLAW_INSTALL_DOCKER_CLI=1
fi
fi
VALID_MOUNTS=()
if [[ -n "$EXTRA_MOUNTS" ]]; then
IFS=',' read -r -a mounts <<<"$EXTRA_MOUNTS"
for mount in "${mounts[@]}"; do
mount="${mount#"${mount%%[![:space:]]*}"}"
mount="${mount%"${mount##*[![:space:]]}"}"
if [[ -n "$mount" ]]; then
VALID_MOUNTS+=("$mount")
fi
done
fi
if [[ -n "$HOME_VOLUME_NAME" || ${#VALID_MOUNTS[@]} -gt 0 ]]; then
# Bash 3.2 + nounset treats "${array[@]}" on an empty array as unbound.
if [[ ${#VALID_MOUNTS[@]} -gt 0 ]]; then
write_extra_compose "$HOME_VOLUME_NAME" "${VALID_MOUNTS[@]}"
else
write_extra_compose "$HOME_VOLUME_NAME"
fi
COMPOSE_FILES+=("$EXTRA_COMPOSE_FILE")
fi
for compose_file in "${COMPOSE_FILES[@]}"; do
COMPOSE_ARGS+=("-f" "$compose_file")
done
# Keep a base compose arg set without sandbox overlay so rollback paths can
# force a known-safe gateway service definition (no docker.sock mount).
BASE_COMPOSE_ARGS=("${COMPOSE_ARGS[@]}")
COMPOSE_HINT="docker compose"
for compose_file in "${COMPOSE_FILES[@]}"; do
COMPOSE_HINT+=" -f ${compose_file}"
done
ENV_FILE="$ROOT_DIR/.env"
upsert_env() {
local file="$1"
shift
local -a keys=("$@")
local tmp
tmp="$(mktemp)"
# Use a delimited string instead of an associative array so the script
# works with Bash 3.2 (macOS default) which lacks `declare -A`.
local seen=" "
if [[ -f "$file" ]]; then
while IFS= read -r line || [[ -n "$line" ]]; do
local key="${line%%=*}"
local replaced=false
for k in "${keys[@]}"; do
if [[ "$key" == "$k" ]]; then
printf '%s=%s\n' "$k" "${!k-}" >>"$tmp"
seen="$seen$k "
replaced=true
break
fi
done
if [[ "$replaced" == false ]]; then
printf '%s\n' "$line" >>"$tmp"
fi
done <"$file"
fi
for k in "${keys[@]}"; do
if [[ "$seen" != *" $k "* ]]; then
printf '%s=%s\n' "$k" "${!k-}" >>"$tmp"
fi
done
mv "$tmp" "$file"
}
upsert_env "$ENV_FILE" \
OPENCLAW_CONFIG_DIR \
OPENCLAW_WORKSPACE_DIR \
OPENCLAW_GATEWAY_PORT \
OPENCLAW_BRIDGE_PORT \
OPENCLAW_GATEWAY_BIND \
OPENCLAW_GATEWAY_TOKEN \
OPENCLAW_IMAGE \
OPENCLAW_EXTRA_MOUNTS \
OPENCLAW_HOME_VOLUME \
OPENCLAW_DOCKER_APT_PACKAGES \
OPENCLAW_EXTENSIONS \
OPENCLAW_SANDBOX \
OPENCLAW_DOCKER_SOCKET \
DOCKER_GID \
OPENCLAW_INSTALL_DOCKER_CLI \
OPENCLAW_ALLOW_INSECURE_PRIVATE_WS \
OPENCLAW_TZ
if [[ "$IMAGE_NAME" == "openclaw:local" ]]; then
echo "==> Building Docker image: $IMAGE_NAME"
docker build \
--build-arg "OPENCLAW_DOCKER_APT_PACKAGES=${OPENCLAW_DOCKER_APT_PACKAGES}" \
--build-arg "OPENCLAW_EXTENSIONS=${OPENCLAW_EXTENSIONS}" \
--build-arg "OPENCLAW_INSTALL_DOCKER_CLI=${OPENCLAW_INSTALL_DOCKER_CLI:-}" \
-t "$IMAGE_NAME" \
-f "$ROOT_DIR/Dockerfile" \
"$ROOT_DIR"
else
echo "==> Pulling Docker image: $IMAGE_NAME"
if ! docker pull "$IMAGE_NAME"; then
echo "ERROR: Failed to pull image $IMAGE_NAME. Please check the image name and your access permissions." >&2
exit 1
fi
fi
# Ensure bind-mounted data directories are writable by the container's `node`
# user (uid 1000). Host-created dirs inherit the host user's uid which may
# differ, causing EACCES when the container tries to mkdir/write.
# Running a brief root container to chown is the portable Docker idiom --
# it works regardless of the host uid and doesn't require host-side root.
echo ""
echo "==> Fixing data-directory permissions"
# Use -xdev to restrict chown to the config-dir mount only — without it,
# the recursive chown would cross into the workspace bind mount and rewrite
# ownership of all user project files on Linux hosts.
# After fixing the config dir, only the OpenClaw metadata subdirectory
# (.openclaw/) inside the workspace gets chowned, not the user's project files.
docker compose "${COMPOSE_ARGS[@]}" run --rm --user root --entrypoint sh openclaw-cli -c \
'find /home/node/.openclaw -xdev -exec chown node:node {} +; \
[ -d /home/node/.openclaw/workspace/.openclaw ] && chown -R node:node /home/node/.openclaw/workspace/.openclaw || true'
echo ""
echo "==> Onboarding (interactive)"
echo "Docker setup pins Gateway mode to local."
echo "Gateway runtime bind comes from OPENCLAW_GATEWAY_BIND (default: lan)."
echo "Current runtime bind: $OPENCLAW_GATEWAY_BIND"
echo "Gateway token: $OPENCLAW_GATEWAY_TOKEN"
echo "Tailscale exposure: Off (use host-level tailnet/Tailscale setup separately)."
echo "Install Gateway daemon: No (managed by Docker Compose)"
echo ""
docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli onboard --mode local --no-install-daemon
echo ""
echo "==> Docker gateway defaults"
sync_gateway_mode_and_bind
echo ""
echo "==> Control UI origin allowlist"
ensure_control_ui_allowed_origins
echo ""
echo "==> Provider setup (optional)"
echo "WhatsApp (QR):"
echo " ${COMPOSE_HINT} run --rm openclaw-cli channels login"
echo "Telegram (bot token):"
echo " ${COMPOSE_HINT} run --rm openclaw-cli channels add --channel telegram --token <token>"
echo "Discord (bot token):"
echo " ${COMPOSE_HINT} run --rm openclaw-cli channels add --channel discord --token <token>"
echo "Docs: https://docs.openclaw.ai/channels"
echo ""
echo "==> Starting gateway"
docker compose "${COMPOSE_ARGS[@]}" up -d openclaw-gateway
# --- Sandbox setup (opt-in via OPENCLAW_SANDBOX=1) ---
if [[ -n "$SANDBOX_ENABLED" ]]; then
echo ""
echo "==> Sandbox setup"
# Build sandbox image if Dockerfile.sandbox exists.
if [[ -f "$ROOT_DIR/Dockerfile.sandbox" ]]; then
echo "Building sandbox image: openclaw-sandbox:bookworm-slim"
docker build \
-t "openclaw-sandbox:bookworm-slim" \
-f "$ROOT_DIR/Dockerfile.sandbox" \
"$ROOT_DIR"
else
echo "WARNING: Dockerfile.sandbox not found in $ROOT_DIR" >&2
echo " Sandbox config will be applied but no sandbox image will be built." >&2
echo " Agent exec may fail if the configured sandbox image does not exist." >&2
fi
# Defense-in-depth: verify Docker CLI in the running image before enabling
# sandbox. This avoids claiming sandbox is enabled when the image cannot
# launch sandbox containers.
if ! docker compose "${COMPOSE_ARGS[@]}" run --rm --entrypoint docker openclaw-gateway --version >/dev/null 2>&1; then
echo "WARNING: Docker CLI not found inside the container image." >&2
echo " Sandbox requires Docker CLI. Rebuild with --build-arg OPENCLAW_INSTALL_DOCKER_CLI=1" >&2
echo " or use a local build (OPENCLAW_IMAGE=openclaw:local). Skipping sandbox setup." >&2
SANDBOX_ENABLED=""
fi
fi
# Apply sandbox config only if prerequisites are met.
if [[ -n "$SANDBOX_ENABLED" ]]; then
# Mount Docker socket via a dedicated compose overlay. This overlay is
# created only after sandbox prerequisites pass, so the socket is never
# exposed when sandbox cannot actually run.
if [[ -S "$DOCKER_SOCKET_PATH" ]]; then
SANDBOX_COMPOSE_FILE="$ROOT_DIR/docker-compose.sandbox.yml"
cat >"$SANDBOX_COMPOSE_FILE" <<YAML
services:
openclaw-gateway:
volumes:
- ${DOCKER_SOCKET_PATH}:/var/run/docker.sock
YAML
if [[ -n "${DOCKER_GID:-}" ]]; then
cat >>"$SANDBOX_COMPOSE_FILE" <<YAML
group_add:
- "${DOCKER_GID}"
YAML
fi
COMPOSE_ARGS+=("-f" "$SANDBOX_COMPOSE_FILE")
echo "==> Sandbox: added Docker socket mount"
else
echo "WARNING: OPENCLAW_SANDBOX enabled but Docker socket not found at $DOCKER_SOCKET_PATH." >&2
echo " Sandbox requires Docker socket access. Skipping sandbox setup." >&2
SANDBOX_ENABLED=""
fi
fi
if [[ -n "$SANDBOX_ENABLED" ]]; then
# Enable sandbox in OpenClaw config.
sandbox_config_ok=true
if ! docker compose "${COMPOSE_ARGS[@]}" run --rm --no-deps openclaw-cli \
config set agents.defaults.sandbox.mode "non-main" >/dev/null; then
echo "WARNING: Failed to set agents.defaults.sandbox.mode" >&2
sandbox_config_ok=false
fi
if ! docker compose "${COMPOSE_ARGS[@]}" run --rm --no-deps openclaw-cli \
config set agents.defaults.sandbox.scope "agent" >/dev/null; then
echo "WARNING: Failed to set agents.defaults.sandbox.scope" >&2
sandbox_config_ok=false
fi
if ! docker compose "${COMPOSE_ARGS[@]}" run --rm --no-deps openclaw-cli \
config set agents.defaults.sandbox.workspaceAccess "none" >/dev/null; then
echo "WARNING: Failed to set agents.defaults.sandbox.workspaceAccess" >&2
sandbox_config_ok=false
fi
if [[ "$sandbox_config_ok" == true ]]; then
echo "Sandbox enabled: mode=non-main, scope=agent, workspaceAccess=none"
echo "Docs: https://docs.openclaw.ai/gateway/sandboxing"
# Restart gateway with sandbox compose overlay to pick up socket mount + config.
docker compose "${COMPOSE_ARGS[@]}" up -d openclaw-gateway
else
echo "WARNING: Sandbox config was partially applied. Check errors above." >&2
echo " Skipping gateway restart to avoid exposing Docker socket without a full sandbox policy." >&2
if ! docker compose "${BASE_COMPOSE_ARGS[@]}" run --rm --no-deps openclaw-cli \
config set agents.defaults.sandbox.mode "off" >/dev/null; then
echo "WARNING: Failed to roll back agents.defaults.sandbox.mode to off" >&2
else
echo "Sandbox mode rolled back to off due to partial sandbox config failure."
fi
if [[ -n "${SANDBOX_COMPOSE_FILE:-}" ]]; then
rm -f "$SANDBOX_COMPOSE_FILE"
fi
# Ensure gateway service definition is reset without sandbox overlay mount.
docker compose "${BASE_COMPOSE_ARGS[@]}" up -d --force-recreate openclaw-gateway
fi
else
# Keep reruns deterministic: if sandbox is not active for this run, reset
# persisted sandbox mode so future execs do not require docker.sock by stale
# config alone.
if ! docker compose "${COMPOSE_ARGS[@]}" run --rm openclaw-cli \
config set agents.defaults.sandbox.mode "off" >/dev/null; then
echo "WARNING: Failed to reset agents.defaults.sandbox.mode to off" >&2
fi
if [[ -f "$ROOT_DIR/docker-compose.sandbox.yml" ]]; then
rm -f "$ROOT_DIR/docker-compose.sandbox.yml"
fi
fi
echo ""
echo "Gateway running with host port mapping."
echo "Access from tailnet devices via the host's tailnet IP."
echo "Config: $OPENCLAW_CONFIG_DIR"
echo "Workspace: $OPENCLAW_WORKSPACE_DIR"
echo "Token: $OPENCLAW_GATEWAY_TOKEN"
echo ""
echo "Commands:"
echo " ${COMPOSE_HINT} logs -f openclaw-gateway"
echo " ${COMPOSE_HINT} exec openclaw-gateway node dist/index.js health --token \"$OPENCLAW_GATEWAY_TOKEN\""

View File

@ -9,6 +9,8 @@
"runtime",
"runtime-env",
"setup",
"setup-adapter-runtime",
"setup-runtime",
"channel-setup",
"setup-tools",
"config-runtime",

View File

@ -1,5 +1,5 @@
# OpenClaw gateway — Podman Quadlet (rootless)
# Installed by setup-podman.sh into openclaw's ~/.config/containers/systemd/
# Installed by scripts/podman/setup.sh into openclaw's ~/.config/containers/systemd/
# {{OPENCLAW_HOME}} is replaced at install time.
[Unit]

312
scripts/podman/setup.sh Executable file
View File

@ -0,0 +1,312 @@
#!/usr/bin/env bash
# One-time host setup for rootless OpenClaw in Podman: creates the openclaw
# user, builds the image, loads it into that user's Podman store, and installs
# the launch script. Run from repo root with sudo capability.
#
# Usage: ./scripts/podman/setup.sh [--quadlet|--container]
# --quadlet Install systemd Quadlet so the container runs as a user service
# --container Only install user + image + launch script; you start the container manually (default)
# Or set OPENCLAW_PODMAN_QUADLET=1 (or 0) to choose without a flag.
#
# After this, start the gateway manually:
# ./scripts/run-openclaw-podman.sh launch
# ./scripts/run-openclaw-podman.sh launch setup # onboarding wizard
# Or as the openclaw user: sudo -u openclaw /home/openclaw/run-openclaw-podman.sh
# If you used --quadlet, you can also: sudo systemctl --machine openclaw@ --user start openclaw.service
set -euo pipefail
OPENCLAW_USER="${OPENCLAW_PODMAN_USER:-openclaw}"
REPO_PATH="${OPENCLAW_REPO_PATH:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
RUN_SCRIPT_SRC="$REPO_PATH/scripts/run-openclaw-podman.sh"
QUADLET_TEMPLATE="$REPO_PATH/scripts/podman/openclaw.container.in"
require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "Missing dependency: $1" >&2
exit 1
fi
}
is_writable_dir() {
local dir="$1"
[[ -n "$dir" && -d "$dir" && ! -L "$dir" && -w "$dir" && -x "$dir" ]]
}
is_safe_tmp_base() {
local dir="$1"
local mode=""
local owner=""
is_writable_dir "$dir" || return 1
mode="$(stat -Lc '%a' "$dir" 2>/dev/null || true)"
if [[ -n "$mode" ]]; then
local perm=$((8#$mode))
if (( (perm & 0022) != 0 && (perm & 01000) == 0 )); then
return 1
fi
fi
if is_root; then
owner="$(stat -Lc '%u' "$dir" 2>/dev/null || true)"
if [[ -n "$owner" && "$owner" != "0" ]]; then
return 1
fi
fi
return 0
}
resolve_image_tmp_dir() {
if ! is_root && is_safe_tmp_base "${TMPDIR:-}"; then
printf '%s' "$TMPDIR"
return 0
fi
if is_safe_tmp_base "/var/tmp"; then
printf '%s' "/var/tmp"
return 0
fi
if is_safe_tmp_base "/tmp"; then
printf '%s' "/tmp"
return 0
fi
printf '%s' "/tmp"
}
is_root() { [[ "$(id -u)" -eq 0 ]]; }
run_root() {
if is_root; then
"$@"
else
sudo "$@"
fi
}
run_as_user() {
# When switching users, the caller's cwd may be inaccessible to the target
# user (e.g. a private home dir). Wrap in a subshell that cd's to a
# world-traversable directory so sudo/runuser don't fail with "cannot chdir".
# TODO: replace with fully rootless podman build to eliminate the need for
# user-switching entirely.
local user="$1"
shift
if command -v sudo >/dev/null 2>&1; then
( cd /tmp 2>/dev/null || cd /; sudo -u "$user" "$@" )
elif is_root && command -v runuser >/dev/null 2>&1; then
( cd /tmp 2>/dev/null || cd /; runuser -u "$user" -- "$@" )
else
echo "Need sudo (or root+runuser) to run commands as $user." >&2
exit 1
fi
}
run_as_openclaw() {
# Avoid root writes into $OPENCLAW_HOME (symlink/hardlink/TOCTOU footguns).
# Anything under the target user's home should be created/modified as that user.
run_as_user "$OPENCLAW_USER" env HOME="$OPENCLAW_HOME" "$@"
}
escape_sed_replacement_pipe_delim() {
# Escape replacement metacharacters for sed "s|...|...|g" replacement text.
printf '%s' "$1" | sed -e 's/[\\&|]/\\&/g'
}
# Quadlet: opt-in via --quadlet or OPENCLAW_PODMAN_QUADLET=1
INSTALL_QUADLET=false
for arg in "$@"; do
case "$arg" in
--quadlet) INSTALL_QUADLET=true ;;
--container) INSTALL_QUADLET=false ;;
esac
done
if [[ -n "${OPENCLAW_PODMAN_QUADLET:-}" ]]; then
case "${OPENCLAW_PODMAN_QUADLET,,}" in
1|yes|true) INSTALL_QUADLET=true ;;
0|no|false) INSTALL_QUADLET=false ;;
esac
fi
require_cmd podman
if ! is_root; then
require_cmd sudo
fi
if [[ ! -f "$REPO_PATH/Dockerfile" ]]; then
echo "Dockerfile not found at $REPO_PATH. Set OPENCLAW_REPO_PATH to the repo root." >&2
exit 1
fi
if [[ ! -f "$RUN_SCRIPT_SRC" ]]; then
echo "Launch script not found at $RUN_SCRIPT_SRC." >&2
exit 1
fi
generate_token_hex_32() {
if command -v openssl >/dev/null 2>&1; then
openssl rand -hex 32
return 0
fi
if command -v python3 >/dev/null 2>&1; then
python3 - <<'PY'
import secrets
print(secrets.token_hex(32))
PY
return 0
fi
if command -v od >/dev/null 2>&1; then
# 32 random bytes -> 64 lowercase hex chars
od -An -N32 -tx1 /dev/urandom | tr -d " \n"
return 0
fi
echo "Missing dependency: need openssl or python3 (or od) to generate OPENCLAW_GATEWAY_TOKEN." >&2
exit 1
}
user_exists() {
local user="$1"
if command -v getent >/dev/null 2>&1; then
getent passwd "$user" >/dev/null 2>&1 && return 0
fi
id -u "$user" >/dev/null 2>&1
}
resolve_user_home() {
local user="$1"
local home=""
if command -v getent >/dev/null 2>&1; then
home="$(getent passwd "$user" 2>/dev/null | cut -d: -f6 || true)"
fi
if [[ -z "$home" && -f /etc/passwd ]]; then
home="$(awk -F: -v u="$user" '$1==u {print $6}' /etc/passwd 2>/dev/null || true)"
fi
if [[ -z "$home" ]]; then
home="/home/$user"
fi
printf '%s' "$home"
}
resolve_nologin_shell() {
for cand in /usr/sbin/nologin /sbin/nologin /usr/bin/nologin /bin/false; do
if [[ -x "$cand" ]]; then
printf '%s' "$cand"
return 0
fi
done
printf '%s' "/usr/sbin/nologin"
}
# Create openclaw user (non-login, with home) if missing
if ! user_exists "$OPENCLAW_USER"; then
NOLOGIN_SHELL="$(resolve_nologin_shell)"
echo "Creating user $OPENCLAW_USER ($NOLOGIN_SHELL, with home)..."
if command -v useradd >/dev/null 2>&1; then
run_root useradd -m -s "$NOLOGIN_SHELL" "$OPENCLAW_USER"
elif command -v adduser >/dev/null 2>&1; then
# Debian/Ubuntu: adduser supports --disabled-password/--gecos. Busybox adduser differs.
run_root adduser --disabled-password --gecos "" --shell "$NOLOGIN_SHELL" "$OPENCLAW_USER"
else
echo "Neither useradd nor adduser found, cannot create user $OPENCLAW_USER." >&2
exit 1
fi
else
echo "User $OPENCLAW_USER already exists."
fi
OPENCLAW_HOME="$(resolve_user_home "$OPENCLAW_USER")"
OPENCLAW_UID="$(id -u "$OPENCLAW_USER" 2>/dev/null || true)"
OPENCLAW_CONFIG="$OPENCLAW_HOME/.openclaw"
LAUNCH_SCRIPT_DST="$OPENCLAW_HOME/run-openclaw-podman.sh"
# Prefer systemd user services (Quadlet) for production. Enable lingering early so rootless Podman can run
# without an interactive login.
if command -v loginctl &>/dev/null; then
run_root loginctl enable-linger "$OPENCLAW_USER" 2>/dev/null || true
fi
if [[ -n "${OPENCLAW_UID:-}" && -d /run/user ]] && command -v systemctl &>/dev/null; then
if [[ ! -d "/run/user/$OPENCLAW_UID" ]]; then
run_root install -d -m 700 -o "$OPENCLAW_UID" -g "$OPENCLAW_UID" "/run/user/$OPENCLAW_UID" || true
fi
run_root mkdir -p "/run/user/$OPENCLAW_UID/containers" || true
run_root chown "$OPENCLAW_UID:$OPENCLAW_UID" "/run/user/$OPENCLAW_UID/containers" || true
run_root chmod 700 "/run/user/$OPENCLAW_UID/containers" || true
fi
mkdir_user_dirs_as_openclaw() {
run_root install -d -m 700 -o "$OPENCLAW_UID" -g "$OPENCLAW_UID" "$OPENCLAW_HOME" "$OPENCLAW_CONFIG"
run_root install -d -m 700 -o "$OPENCLAW_UID" -g "$OPENCLAW_UID" "$OPENCLAW_CONFIG/workspace"
}
ensure_subid_entry() {
local file="$1"
if [[ ! -f "$file" ]]; then
return 1
fi
grep -q "^${OPENCLAW_USER}:" "$file" 2>/dev/null
}
if ! ensure_subid_entry /etc/subuid || ! ensure_subid_entry /etc/subgid; then
echo "WARNING: ${OPENCLAW_USER} may not have subuid/subgid ranges configured." >&2
echo "If rootless Podman fails, add 'openclaw:100000:65536' to both /etc/subuid and /etc/subgid." >&2
fi
mkdir_user_dirs_as_openclaw
IMAGE_TMP_BASE="$(resolve_image_tmp_dir)"
echo "Using temp base for image export: $IMAGE_TMP_BASE"
IMAGE_TAR_DIR="$(mktemp -d "${IMAGE_TMP_BASE%/}/openclaw-podman-image.XXXXXX")"
chmod 700 "$IMAGE_TAR_DIR"
IMAGE_TAR="$IMAGE_TAR_DIR/openclaw-image.tar"
cleanup_image_tar() {
rm -rf "$IMAGE_TAR_DIR"
}
trap cleanup_image_tar EXIT
BUILD_ARGS=()
if [[ -n "${OPENCLAW_DOCKER_APT_PACKAGES:-}" ]]; then
BUILD_ARGS+=(--build-arg "OPENCLAW_DOCKER_APT_PACKAGES=${OPENCLAW_DOCKER_APT_PACKAGES}")
fi
if [[ -n "${OPENCLAW_EXTENSIONS:-}" ]]; then
BUILD_ARGS+=(--build-arg "OPENCLAW_EXTENSIONS=${OPENCLAW_EXTENSIONS}")
fi
echo "Building image openclaw:local..."
podman build -t openclaw:local -f "$REPO_PATH/Dockerfile" "${BUILD_ARGS[@]}" "$REPO_PATH"
echo "Saving image to $IMAGE_TAR ..."
podman save -o "$IMAGE_TAR" openclaw:local
echo "Loading image into $OPENCLAW_USER Podman store..."
run_as_openclaw podman load -i "$IMAGE_TAR"
echo "Installing launch script to $LAUNCH_SCRIPT_DST ..."
run_root install -m 0755 -o "$OPENCLAW_UID" -g "$OPENCLAW_UID" "$RUN_SCRIPT_SRC" "$LAUNCH_SCRIPT_DST"
if [[ ! -f "$OPENCLAW_CONFIG/.env" ]]; then
TOKEN="$(generate_token_hex_32)"
run_as_openclaw sh -lc "umask 077 && printf '%s\n' 'OPENCLAW_GATEWAY_TOKEN=$TOKEN' > '$OPENCLAW_CONFIG/.env'"
echo "Generated OPENCLAW_GATEWAY_TOKEN and wrote it to $OPENCLAW_CONFIG/.env"
fi
if [[ ! -f "$OPENCLAW_CONFIG/openclaw.json" ]]; then
run_as_openclaw sh -lc "umask 077 && cat > '$OPENCLAW_CONFIG/openclaw.json' <<'JSON'
{ \"gateway\": { \"mode\": \"local\" } }
JSON"
echo "Wrote minimal config to $OPENCLAW_CONFIG/openclaw.json"
fi
if [[ "$INSTALL_QUADLET" == true ]]; then
QUADLET_DIR="$OPENCLAW_HOME/.config/containers/systemd"
QUADLET_DST="$QUADLET_DIR/openclaw.container"
echo "Installing Quadlet to $QUADLET_DST ..."
run_as_openclaw mkdir -p "$QUADLET_DIR"
OPENCLAW_HOME_ESCAPED="$(escape_sed_replacement_pipe_delim "$OPENCLAW_HOME")"
sed "s|{{OPENCLAW_HOME}}|$OPENCLAW_HOME_ESCAPED|g" "$QUADLET_TEMPLATE" | \
run_as_openclaw sh -lc "cat > '$QUADLET_DST'"
run_as_openclaw chmod 0644 "$QUADLET_DST"
echo "Reloading and enabling user service..."
run_root systemctl --machine "${OPENCLAW_USER}@" --user daemon-reload
run_root systemctl --machine "${OPENCLAW_USER}@" --user enable --now openclaw.service
echo "Quadlet installed and service started."
else
echo "Container + launch script installed."
fi
echo
echo "Next:"
echo " ./scripts/run-openclaw-podman.sh launch"
echo " ./scripts/run-openclaw-podman.sh launch setup"

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash
# Rootless OpenClaw in Podman: run after one-time setup.
#
# One-time setup (from repo root): ./setup-podman.sh
# One-time setup (from repo root): ./scripts/podman/setup.sh
# Then:
# ./scripts/run-openclaw-podman.sh launch # Start gateway
# ./scripts/run-openclaw-podman.sh launch setup # Onboarding wizard
@ -10,7 +10,7 @@
# sudo -u openclaw /home/openclaw/run-openclaw-podman.sh
# sudo -u openclaw /home/openclaw/run-openclaw-podman.sh setup
#
# Legacy: "setup-host" delegates to ../setup-podman.sh
# Legacy: "setup-host" delegates to the Podman setup script
set -euo pipefail
@ -35,15 +35,19 @@ OPENCLAW_HOME="$(resolve_user_home "$OPENCLAW_USER")"
OPENCLAW_UID="$(id -u "$OPENCLAW_USER" 2>/dev/null || true)"
LAUNCH_SCRIPT="$OPENCLAW_HOME/run-openclaw-podman.sh"
# Legacy: setup-host → run setup-podman.sh
# Legacy: setup-host → run the Podman setup script
if [[ "${1:-}" == "setup-host" ]]; then
shift
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
SETUP_PODMAN="$REPO_ROOT/scripts/podman/setup.sh"
if [[ -f "$SETUP_PODMAN" ]]; then
exec "$SETUP_PODMAN" "$@"
fi
SETUP_PODMAN="$REPO_ROOT/setup-podman.sh"
if [[ -f "$SETUP_PODMAN" ]]; then
exec "$SETUP_PODMAN" "$@"
fi
echo "setup-podman.sh not found at $SETUP_PODMAN. Run from repo root: ./setup-podman.sh" >&2
echo "Podman setup script not found. Run from repo root: ./scripts/podman/setup.sh" >&2
exit 1
fi
@ -228,4 +232,4 @@ podman run --pull="$PODMAN_PULL" -d --replace \
echo "Container $CONTAINER_NAME started. Dashboard: http://127.0.0.1:${HOST_GATEWAY_PORT}/"
echo "Logs: podman logs -f $CONTAINER_NAME"
echo "For auto-start/restarts, use: ./setup-podman.sh --quadlet (Quadlet + systemd user service)."
echo "For auto-start/restarts, use: ./scripts/podman/setup.sh --quadlet (Quadlet + systemd user service)."

View File

@ -209,7 +209,7 @@ docker ps
- Docker and Docker Compose installed
- Bash or Zsh shell
- OpenClaw project (from `docker-setup.sh`)
- OpenClaw project (run `scripts/docker/setup.sh`)
## Development

View File

@ -114,7 +114,7 @@ _clawdock_ensure_dir() {
echo "Clone it first:"
echo ""
echo " git clone https://github.com/openclaw/openclaw.git ~/openclaw"
echo " cd ~/openclaw && ./docker-setup.sh"
echo " cd ~/openclaw && ./scripts/docker/setup.sh"
echo ""
echo "Or set CLAWDOCK_DIR if it's elsewhere:"
echo ""

View File

@ -5,6 +5,41 @@ import path from "node:path";
import { fileURLToPath, pathToFileURL } from "node:url";
import { stageBundledPluginRuntime } from "./stage-bundled-plugin-runtime.mjs";
const warningFilterKey = Symbol.for("openclaw.warning-filter");
function installProcessWarningFilter() {
if (globalThis[warningFilterKey]?.installed) {
return;
}
const originalEmitWarning = process.emitWarning.bind(process);
process.emitWarning = (...args) => {
const [warningArg, secondArg, thirdArg] = args;
const warning =
warningArg instanceof Error
? {
name: warningArg.name,
message: warningArg.message,
code: warningArg.code,
}
: {
name: typeof secondArg === "string" ? secondArg : secondArg?.type,
message: typeof warningArg === "string" ? warningArg : undefined,
code: typeof thirdArg === "string" ? thirdArg : secondArg?.code,
};
if (warning.code === "DEP0040" && warning.message?.includes("punycode")) {
return;
}
return Reflect.apply(originalEmitWarning, process, args);
};
globalThis[warningFilterKey] = { installed: true };
}
installProcessWarningFilter();
const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), "..");
const smokeEntryPath = path.join(repoRoot, "dist", "plugins", "build-smoke-entry.js");
assert.ok(fs.existsSync(smokeEntryPath), `missing build output: ${smokeEntryPath}`);

View File

@ -0,0 +1,122 @@
import { spawnSync } from "node:child_process";
const ESCAPE = String.fromCodePoint(27);
const BELL = String.fromCodePoint(7);
const ANSI_ESCAPE_PATTERN = new RegExp(
// Strip CSI/OSC-style control sequences from Vitest output before parsing file lines.
`${ESCAPE}(?:\\][^${BELL}]*(?:${BELL}|${ESCAPE}\\\\)|\\[[0-?]*[ -/]*[@-~]|[@-Z\\\\-_])`,
"g",
);
const COMPLETED_TEST_FILE_LINE_PATTERN =
/(?<file>(?:src|extensions|test|ui)\/\S+?\.(?:live\.test|e2e\.test|test)\.ts)\s+\(.*\)\s+(?<duration>\d+(?:\.\d+)?)(?<unit>ms|s)\s*$/;
const PS_COLUMNS = ["pid=", "ppid=", "rss=", "comm="];
function parseDurationMs(rawValue, unit) {
const parsed = Number.parseFloat(rawValue);
if (!Number.isFinite(parsed)) {
return null;
}
return unit === "s" ? Math.round(parsed * 1000) : Math.round(parsed);
}
function stripAnsi(text) {
return text.replaceAll(ANSI_ESCAPE_PATTERN, "");
}
export function parseCompletedTestFileLines(text) {
return stripAnsi(text)
.split(/\r?\n/u)
.map((line) => {
const match = line.match(COMPLETED_TEST_FILE_LINE_PATTERN);
if (!match?.groups) {
return null;
}
return {
file: match.groups.file,
durationMs: parseDurationMs(match.groups.duration, match.groups.unit),
};
})
.filter((entry) => entry !== null);
}
export function getProcessTreeRecords(rootPid) {
if (!Number.isInteger(rootPid) || rootPid <= 0 || process.platform === "win32") {
return null;
}
const result = spawnSync("ps", ["-axo", PS_COLUMNS.join(",")], {
encoding: "utf8",
});
if (result.status !== 0 || result.error) {
return null;
}
const childPidsByParent = new Map();
const recordsByPid = new Map();
for (const line of result.stdout.split(/\r?\n/u)) {
const trimmed = line.trim();
if (!trimmed) {
continue;
}
const [pidRaw, parentRaw, rssRaw, commandRaw] = trimmed.split(/\s+/u, 4);
const pid = Number.parseInt(pidRaw ?? "", 10);
const parentPid = Number.parseInt(parentRaw ?? "", 10);
const rssKb = Number.parseInt(rssRaw ?? "", 10);
if (!Number.isInteger(pid) || !Number.isInteger(parentPid) || !Number.isInteger(rssKb)) {
continue;
}
const siblings = childPidsByParent.get(parentPid) ?? [];
siblings.push(pid);
childPidsByParent.set(parentPid, siblings);
recordsByPid.set(pid, {
pid,
parentPid,
rssKb,
command: commandRaw ?? "",
});
}
if (!recordsByPid.has(rootPid)) {
return null;
}
const queue = [rootPid];
const visited = new Set();
const records = [];
while (queue.length > 0) {
const pid = queue.shift();
if (pid === undefined || visited.has(pid)) {
continue;
}
visited.add(pid);
const record = recordsByPid.get(pid);
if (record) {
records.push(record);
}
for (const childPid of childPidsByParent.get(pid) ?? []) {
if (!visited.has(childPid)) {
queue.push(childPid);
}
}
}
return records;
}
export function sampleProcessTreeRssKb(rootPid) {
const records = getProcessTreeRecords(rootPid);
if (!records) {
return null;
}
let rssKb = 0;
let processCount = 0;
for (const record of records) {
rssKb += record.rssKb;
processCount += 1;
}
return { rssKb, processCount };
}

View File

@ -4,6 +4,11 @@ import os from "node:os";
import path from "node:path";
import { channelTestPrefixes } from "../vitest.channel-paths.mjs";
import { isUnitConfigTestFile } from "../vitest.unit-paths.mjs";
import {
getProcessTreeRecords,
parseCompletedTestFileLines,
sampleProcessTreeRssKb,
} from "./test-parallel-memory.mjs";
import {
appendCapturedOutput,
hasFatalTestRunOutput,
@ -46,17 +51,6 @@ const hostMemoryGiB = Math.floor(os.totalmem() / 1024 ** 3);
const highMemLocalHost = !isCI && hostMemoryGiB >= 96;
const lowMemLocalHost = !isCI && hostMemoryGiB < 64;
const nodeMajor = Number.parseInt(process.versions.node.split(".")[0] ?? "", 10);
// vmForks is a big win for transform/import heavy suites. Node 24 is stable again
// for the default unit-fast lane after moving the known flaky files to fork-only
// isolation, but Node 25+ still falls back to process forks until re-validated.
// Keep it opt-out via OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1.
const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor <= 24 : true;
const useVmForks =
process.env.OPENCLAW_TEST_VM_FORKS === "1" ||
(process.env.OPENCLAW_TEST_VM_FORKS !== "0" && !isWindows && supportsVmForks && !lowMemLocalHost);
const disableIsolation = process.env.OPENCLAW_TEST_NO_ISOLATE === "1";
const includeGatewaySuite = process.env.OPENCLAW_TEST_INCLUDE_GATEWAY === "1";
const includeExtensionsSuite = process.env.OPENCLAW_TEST_INCLUDE_EXTENSIONS === "1";
const rawTestProfile = process.env.OPENCLAW_TEST_PROFILE?.trim().toLowerCase();
const testProfile =
rawTestProfile === "low" ||
@ -67,6 +61,21 @@ const testProfile =
? rawTestProfile
: "normal";
const isMacMiniProfile = testProfile === "macmini";
// vmForks is a big win for transform/import heavy suites. Node 24 is stable again
// for the default unit-fast lane after moving the known flaky files to fork-only
// isolation, but Node 25+ still falls back to process forks until re-validated.
// Keep it opt-out via OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1.
const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor <= 24 : true;
const useVmForks =
process.env.OPENCLAW_TEST_VM_FORKS === "1" ||
(process.env.OPENCLAW_TEST_VM_FORKS !== "0" &&
!isWindows &&
supportsVmForks &&
!lowMemLocalHost &&
(isCI || testProfile !== "low"));
const disableIsolation = process.env.OPENCLAW_TEST_NO_ISOLATE === "1";
const includeGatewaySuite = process.env.OPENCLAW_TEST_INCLUDE_GATEWAY === "1";
const includeExtensionsSuite = process.env.OPENCLAW_TEST_INCLUDE_EXTENSIONS === "1";
// Even on low-memory hosts, keep the isolated lane split so files like
// git-commit.test.ts still get the worker/process isolation they require.
const shouldSplitUnitRuns = testProfile !== "serial";
@ -184,6 +193,7 @@ const countExplicitEntryFilters = (entryArgs) => {
const { fileFilters } = parsePassthroughArgs(entryArgs.slice(2));
return fileFilters.length > 0 ? fileFilters.length : null;
};
const getExplicitEntryFilters = (entryArgs) => parsePassthroughArgs(entryArgs.slice(2)).fileFilters;
const passthroughRequiresSingleRun = passthroughOptionArgs.some((arg) => {
if (!arg.startsWith("-")) {
return false;
@ -576,6 +586,22 @@ const topLevelParallelEnabled =
testProfile !== "serial" &&
!(!isCI && nodeMajor >= 25) &&
!isMacMiniProfile;
const defaultTopLevelParallelLimit =
testProfile === "serial"
? 1
: testProfile === "low"
? 2
: testProfile === "max"
? 5
: highMemLocalHost
? 4
: lowMemLocalHost
? 2
: 3;
const topLevelParallelLimit = Math.max(
1,
parseEnvNumber("OPENCLAW_TEST_TOP_LEVEL_CONCURRENCY", defaultTopLevelParallelLimit),
);
const overrideWorkers = Number.parseInt(process.env.OPENCLAW_TEST_WORKERS ?? "", 10);
const resolvedOverride =
Number.isFinite(overrideWorkers) && overrideWorkers > 0 ? overrideWorkers : null;
@ -707,6 +733,39 @@ const maxOldSpaceSizeMb = (() => {
})();
const formatElapsedMs = (elapsedMs) =>
elapsedMs >= 1000 ? `${(elapsedMs / 1000).toFixed(1)}s` : `${Math.round(elapsedMs)}ms`;
const formatMemoryKb = (rssKb) =>
rssKb >= 1024 ** 2
? `${(rssKb / 1024 ** 2).toFixed(2)}GiB`
: rssKb >= 1024
? `${(rssKb / 1024).toFixed(1)}MiB`
: `${rssKb}KiB`;
const formatMemoryDeltaKb = (rssKb) =>
`${rssKb >= 0 ? "+" : "-"}${formatMemoryKb(Math.abs(rssKb))}`;
const rawMemoryTrace = process.env.OPENCLAW_TEST_MEMORY_TRACE?.trim().toLowerCase();
const memoryTraceEnabled =
process.platform !== "win32" &&
(rawMemoryTrace === "1" ||
rawMemoryTrace === "true" ||
(rawMemoryTrace !== "0" && rawMemoryTrace !== "false" && isCI));
const memoryTracePollMs = Math.max(250, parseEnvNumber("OPENCLAW_TEST_MEMORY_TRACE_POLL_MS", 1000));
const memoryTraceTopCount = Math.max(1, parseEnvNumber("OPENCLAW_TEST_MEMORY_TRACE_TOP_COUNT", 6));
const heapSnapshotIntervalMs = Math.max(
0,
parseEnvNumber("OPENCLAW_TEST_HEAPSNAPSHOT_INTERVAL_MS", 0),
);
const heapSnapshotMinIntervalMs = 5000;
const heapSnapshotEnabled =
process.platform !== "win32" && heapSnapshotIntervalMs >= heapSnapshotMinIntervalMs;
const heapSnapshotSignal = process.env.OPENCLAW_TEST_HEAPSNAPSHOT_SIGNAL?.trim() || "SIGUSR2";
const heapSnapshotBaseDir = heapSnapshotEnabled
? path.resolve(
process.env.OPENCLAW_TEST_HEAPSNAPSHOT_DIR?.trim() ||
path.join(os.tmpdir(), `openclaw-heapsnapshots-${Date.now()}`),
)
: null;
const ensureNodeOptionFlag = (nodeOptions, flagPrefix, nextValue) =>
nodeOptions.includes(flagPrefix) ? nodeOptions : `${nodeOptions} ${nextValue}`.trim();
const isNodeLikeProcess = (command) => /(?:^|\/)node(?:$|\.exe$)/iu.test(command);
const runOnce = (entry, extraArgs = []) =>
new Promise((resolve) => {
@ -718,6 +777,7 @@ const runOnce = (entry, extraArgs = []) =>
entry.name === "extensions" && maxWorkers === 1 && entry.args.includes("--pool=vmForks")
? entry.args.map((arg) => (arg === "--pool=vmForks" ? "--pool=forks" : arg))
: entry.args;
const explicitEntryFilters = getExplicitEntryFilters(entryArgs);
const args = maxWorkers
? [
...entryArgs,
@ -738,23 +798,182 @@ const runOnce = (entry, extraArgs = []) =>
(acc, flag) => (acc.includes(flag) ? acc : `${acc} ${flag}`.trim()),
nodeOptions,
);
const heapFlag =
const heapSnapshotDir =
heapSnapshotBaseDir === null ? null : path.join(heapSnapshotBaseDir, entry.name);
let resolvedNodeOptions =
maxOldSpaceSizeMb && !nextNodeOptions.includes("--max-old-space-size=")
? `--max-old-space-size=${maxOldSpaceSizeMb}`
: null;
const resolvedNodeOptions = heapFlag
? `${nextNodeOptions} ${heapFlag}`.trim()
: nextNodeOptions;
? `${nextNodeOptions} --max-old-space-size=${maxOldSpaceSizeMb}`.trim()
: nextNodeOptions;
if (heapSnapshotEnabled && heapSnapshotDir) {
try {
fs.mkdirSync(heapSnapshotDir, { recursive: true });
} catch (err) {
console.error(
`[test-parallel] failed to create heap snapshot dir ${heapSnapshotDir}: ${String(err)}`,
);
resolve(1);
return;
}
resolvedNodeOptions = ensureNodeOptionFlag(
resolvedNodeOptions,
"--diagnostic-dir=",
`--diagnostic-dir=${heapSnapshotDir}`,
);
resolvedNodeOptions = ensureNodeOptionFlag(
resolvedNodeOptions,
"--heapsnapshot-signal=",
`--heapsnapshot-signal=${heapSnapshotSignal}`,
);
}
let output = "";
let fatalSeen = false;
let childError = null;
let child;
let pendingLine = "";
let memoryPollTimer = null;
let heapSnapshotTimer = null;
const memoryFileRecords = [];
let initialTreeSample = null;
let latestTreeSample = null;
let peakTreeSample = null;
let heapSnapshotSequence = 0;
const updatePeakTreeSample = (sample, reason) => {
if (!sample) {
return;
}
if (!peakTreeSample || sample.rssKb > peakTreeSample.rssKb) {
peakTreeSample = { ...sample, reason };
}
};
const triggerHeapSnapshot = (reason) => {
if (!heapSnapshotEnabled || !child?.pid || !heapSnapshotDir) {
return;
}
const records = getProcessTreeRecords(child.pid) ?? [];
const targetPids = records
.filter((record) => record.pid !== process.pid && isNodeLikeProcess(record.command))
.map((record) => record.pid);
if (targetPids.length === 0) {
return;
}
heapSnapshotSequence += 1;
let signaledCount = 0;
for (const pid of targetPids) {
try {
process.kill(pid, heapSnapshotSignal);
signaledCount += 1;
} catch {
// Process likely exited between ps sampling and signal delivery.
}
}
if (signaledCount > 0) {
console.log(
`[test-parallel][heap] ${entry.name} seq=${String(heapSnapshotSequence)} reason=${reason} signaled=${String(
signaledCount,
)}/${String(targetPids.length)} dir=${heapSnapshotDir}`,
);
}
};
const captureTreeSample = (reason) => {
if (!memoryTraceEnabled || !child?.pid) {
return null;
}
const sample = sampleProcessTreeRssKb(child.pid);
if (!sample) {
return null;
}
latestTreeSample = sample;
if (!initialTreeSample) {
initialTreeSample = sample;
}
updatePeakTreeSample(sample, reason);
return sample;
};
const logMemoryTraceForText = (text) => {
if (!memoryTraceEnabled) {
return;
}
const combined = `${pendingLine}${text}`;
const lines = combined.split(/\r?\n/u);
pendingLine = lines.pop() ?? "";
const completedFiles = parseCompletedTestFileLines(lines.join("\n"));
for (const completedFile of completedFiles) {
const sample = captureTreeSample(completedFile.file);
if (!sample) {
continue;
}
const previousRssKb =
memoryFileRecords.length > 0
? (memoryFileRecords.at(-1)?.rssKb ?? initialTreeSample?.rssKb ?? sample.rssKb)
: (initialTreeSample?.rssKb ?? sample.rssKb);
const deltaKb = sample.rssKb - previousRssKb;
const record = {
...completedFile,
rssKb: sample.rssKb,
processCount: sample.processCount,
deltaKb,
};
memoryFileRecords.push(record);
console.log(
`[test-parallel][mem] ${entry.name} file=${record.file} rss=${formatMemoryKb(
record.rssKb,
)} delta=${formatMemoryDeltaKb(record.deltaKb)} peak=${formatMemoryKb(
peakTreeSample?.rssKb ?? record.rssKb,
)} procs=${record.processCount}${record.durationMs ? ` duration=${formatElapsedMs(record.durationMs)}` : ""}`,
);
}
};
const logMemoryTraceSummary = () => {
if (!memoryTraceEnabled) {
return;
}
captureTreeSample("close");
const fallbackRecord =
memoryFileRecords.length === 0 &&
explicitEntryFilters.length === 1 &&
latestTreeSample &&
initialTreeSample
? [
{
file: explicitEntryFilters[0],
deltaKb: latestTreeSample.rssKb - initialTreeSample.rssKb,
},
]
: [];
const totalDeltaKb =
initialTreeSample && latestTreeSample
? latestTreeSample.rssKb - initialTreeSample.rssKb
: 0;
const topGrowthFiles = [...memoryFileRecords, ...fallbackRecord]
.filter((record) => record.deltaKb > 0 && typeof record.file === "string")
.toSorted((left, right) => right.deltaKb - left.deltaKb)
.slice(0, memoryTraceTopCount)
.map((record) => `${record.file}:${formatMemoryDeltaKb(record.deltaKb)}`);
console.log(
`[test-parallel][mem] summary ${entry.name} files=${memoryFileRecords.length} peak=${formatMemoryKb(
peakTreeSample?.rssKb ?? 0,
)} totalDelta=${formatMemoryDeltaKb(totalDeltaKb)} peakAt=${
peakTreeSample?.reason ?? "n/a"
} top=${topGrowthFiles.length > 0 ? topGrowthFiles.join(", ") : "none"}`,
);
};
try {
child = spawn(pnpm, args, {
stdio: ["inherit", "pipe", "pipe"],
env: { ...process.env, VITEST_GROUP: entry.name, NODE_OPTIONS: resolvedNodeOptions },
shell: isWindows,
});
captureTreeSample("spawn");
if (memoryTraceEnabled) {
memoryPollTimer = setInterval(() => {
captureTreeSample("poll");
}, memoryTracePollMs);
}
if (heapSnapshotEnabled) {
heapSnapshotTimer = setInterval(() => {
triggerHeapSnapshot("interval");
}, heapSnapshotIntervalMs);
}
} catch (err) {
console.error(`[test-parallel] spawn failed: ${String(err)}`);
resolve(1);
@ -765,12 +984,14 @@ const runOnce = (entry, extraArgs = []) =>
const text = chunk.toString();
fatalSeen ||= hasFatalTestRunOutput(`${output}${text}`);
output = appendCapturedOutput(output, text);
logMemoryTraceForText(text);
process.stdout.write(chunk);
});
child.stderr?.on("data", (chunk) => {
const text = chunk.toString();
fatalSeen ||= hasFatalTestRunOutput(`${output}${text}`);
output = appendCapturedOutput(output, text);
logMemoryTraceForText(text);
process.stderr.write(chunk);
});
child.on("error", (err) => {
@ -778,8 +999,15 @@ const runOnce = (entry, extraArgs = []) =>
console.error(`[test-parallel] child error: ${String(err)}`);
});
child.on("close", (code, signal) => {
if (memoryPollTimer) {
clearInterval(memoryPollTimer);
}
if (heapSnapshotTimer) {
clearInterval(heapSnapshotTimer);
}
children.delete(child);
const resolvedCode = resolveTestRunExitCode({ code, signal, output, fatalSeen, childError });
logMemoryTraceSummary();
console.log(
`[test-parallel] done ${entry.name} code=${String(resolvedCode)} elapsed=${formatElapsedMs(Date.now() - startedAt)}`,
);
@ -867,8 +1095,10 @@ const runEntriesWithLimit = async (entries, extraArgs = [], concurrency = 1) =>
const runEntries = async (entries, extraArgs = []) => {
if (topLevelParallelEnabled) {
const codes = await Promise.all(entries.map((entry) => run(entry, extraArgs)));
return codes.find((code) => code !== 0);
// Keep a bounded number of top-level Vitest processes in flight. As the
// singleton lane list grows, unbounded Promise.all scheduling turns
// isolation into cross-process contention and can reintroduce timeouts.
return runEntriesWithLimit(entries, extraArgs, topLevelParallelLimit);
}
return runEntriesWithLimit(entries, extraArgs);

View File

@ -1,312 +1,12 @@
#!/usr/bin/env bash
# One-time host setup for rootless OpenClaw in Podman: creates the openclaw
# user, builds the image, loads it into that user's Podman store, and installs
# the launch script. Run from repo root with sudo capability.
#
# Usage: ./setup-podman.sh [--quadlet|--container]
# --quadlet Install systemd Quadlet so the container runs as a user service
# --container Only install user + image + launch script; you start the container manually (default)
# Or set OPENCLAW_PODMAN_QUADLET=1 (or 0) to choose without a flag.
#
# After this, start the gateway manually:
# ./scripts/run-openclaw-podman.sh launch
# ./scripts/run-openclaw-podman.sh launch setup # onboarding wizard
# Or as the openclaw user: sudo -u openclaw /home/openclaw/run-openclaw-podman.sh
# If you used --quadlet, you can also: sudo systemctl --machine openclaw@ --user start openclaw.service
set -euo pipefail
OPENCLAW_USER="${OPENCLAW_PODMAN_USER:-openclaw}"
REPO_PATH="${OPENCLAW_REPO_PATH:-$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)}"
RUN_SCRIPT_SRC="$REPO_PATH/scripts/run-openclaw-podman.sh"
QUADLET_TEMPLATE="$REPO_PATH/scripts/podman/openclaw.container.in"
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SCRIPT_PATH="$ROOT_DIR/scripts/podman/setup.sh"
require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "Missing dependency: $1" >&2
exit 1
fi
}
is_writable_dir() {
local dir="$1"
[[ -n "$dir" && -d "$dir" && ! -L "$dir" && -w "$dir" && -x "$dir" ]]
}
is_safe_tmp_base() {
local dir="$1"
local mode=""
local owner=""
is_writable_dir "$dir" || return 1
mode="$(stat -Lc '%a' "$dir" 2>/dev/null || true)"
if [[ -n "$mode" ]]; then
local perm=$((8#$mode))
if (( (perm & 0022) != 0 && (perm & 01000) == 0 )); then
return 1
fi
fi
if is_root; then
owner="$(stat -Lc '%u' "$dir" 2>/dev/null || true)"
if [[ -n "$owner" && "$owner" != "0" ]]; then
return 1
fi
fi
return 0
}
resolve_image_tmp_dir() {
if ! is_root && is_safe_tmp_base "${TMPDIR:-}"; then
printf '%s' "$TMPDIR"
return 0
fi
if is_safe_tmp_base "/var/tmp"; then
printf '%s' "/var/tmp"
return 0
fi
if is_safe_tmp_base "/tmp"; then
printf '%s' "/tmp"
return 0
fi
printf '%s' "/tmp"
}
is_root() { [[ "$(id -u)" -eq 0 ]]; }
run_root() {
if is_root; then
"$@"
else
sudo "$@"
fi
}
run_as_user() {
# When switching users, the caller's cwd may be inaccessible to the target
# user (e.g. a private home dir). Wrap in a subshell that cd's to a
# world-traversable directory so sudo/runuser don't fail with "cannot chdir".
# TODO: replace with fully rootless podman build to eliminate the need for
# user-switching entirely.
local user="$1"
shift
if command -v sudo >/dev/null 2>&1; then
( cd /tmp 2>/dev/null || cd /; sudo -u "$user" "$@" )
elif is_root && command -v runuser >/dev/null 2>&1; then
( cd /tmp 2>/dev/null || cd /; runuser -u "$user" -- "$@" )
else
echo "Need sudo (or root+runuser) to run commands as $user." >&2
exit 1
fi
}
run_as_openclaw() {
# Avoid root writes into $OPENCLAW_HOME (symlink/hardlink/TOCTOU footguns).
# Anything under the target user's home should be created/modified as that user.
run_as_user "$OPENCLAW_USER" env HOME="$OPENCLAW_HOME" "$@"
}
escape_sed_replacement_pipe_delim() {
# Escape replacement metacharacters for sed "s|...|...|g" replacement text.
printf '%s' "$1" | sed -e 's/[\\&|]/\\&/g'
}
# Quadlet: opt-in via --quadlet or OPENCLAW_PODMAN_QUADLET=1
INSTALL_QUADLET=false
for arg in "$@"; do
case "$arg" in
--quadlet) INSTALL_QUADLET=true ;;
--container) INSTALL_QUADLET=false ;;
esac
done
if [[ -n "${OPENCLAW_PODMAN_QUADLET:-}" ]]; then
case "${OPENCLAW_PODMAN_QUADLET,,}" in
1|yes|true) INSTALL_QUADLET=true ;;
0|no|false) INSTALL_QUADLET=false ;;
esac
fi
require_cmd podman
if ! is_root; then
require_cmd sudo
fi
if [[ ! -f "$REPO_PATH/Dockerfile" ]]; then
echo "Dockerfile not found at $REPO_PATH. Set OPENCLAW_REPO_PATH to the repo root." >&2
exit 1
fi
if [[ ! -f "$RUN_SCRIPT_SRC" ]]; then
echo "Launch script not found at $RUN_SCRIPT_SRC." >&2
if [[ ! -f "$SCRIPT_PATH" ]]; then
echo "Podman setup script not found at $SCRIPT_PATH" >&2
exit 1
fi
generate_token_hex_32() {
if command -v openssl >/dev/null 2>&1; then
openssl rand -hex 32
return 0
fi
if command -v python3 >/dev/null 2>&1; then
python3 - <<'PY'
import secrets
print(secrets.token_hex(32))
PY
return 0
fi
if command -v od >/dev/null 2>&1; then
# 32 random bytes -> 64 lowercase hex chars
od -An -N32 -tx1 /dev/urandom | tr -d " \n"
return 0
fi
echo "Missing dependency: need openssl or python3 (or od) to generate OPENCLAW_GATEWAY_TOKEN." >&2
exit 1
}
user_exists() {
local user="$1"
if command -v getent >/dev/null 2>&1; then
getent passwd "$user" >/dev/null 2>&1 && return 0
fi
id -u "$user" >/dev/null 2>&1
}
resolve_user_home() {
local user="$1"
local home=""
if command -v getent >/dev/null 2>&1; then
home="$(getent passwd "$user" 2>/dev/null | cut -d: -f6 || true)"
fi
if [[ -z "$home" && -f /etc/passwd ]]; then
home="$(awk -F: -v u="$user" '$1==u {print $6}' /etc/passwd 2>/dev/null || true)"
fi
if [[ -z "$home" ]]; then
home="/home/$user"
fi
printf '%s' "$home"
}
resolve_nologin_shell() {
for cand in /usr/sbin/nologin /sbin/nologin /usr/bin/nologin /bin/false; do
if [[ -x "$cand" ]]; then
printf '%s' "$cand"
return 0
fi
done
printf '%s' "/usr/sbin/nologin"
}
# Create openclaw user (non-login, with home) if missing
if ! user_exists "$OPENCLAW_USER"; then
NOLOGIN_SHELL="$(resolve_nologin_shell)"
echo "Creating user $OPENCLAW_USER ($NOLOGIN_SHELL, with home)..."
if command -v useradd >/dev/null 2>&1; then
run_root useradd -m -s "$NOLOGIN_SHELL" "$OPENCLAW_USER"
elif command -v adduser >/dev/null 2>&1; then
# Debian/Ubuntu: adduser supports --disabled-password/--gecos. Busybox adduser differs.
run_root adduser --disabled-password --gecos "" --shell "$NOLOGIN_SHELL" "$OPENCLAW_USER"
else
echo "Neither useradd nor adduser found, cannot create user $OPENCLAW_USER." >&2
exit 1
fi
else
echo "User $OPENCLAW_USER already exists."
fi
OPENCLAW_HOME="$(resolve_user_home "$OPENCLAW_USER")"
OPENCLAW_UID="$(id -u "$OPENCLAW_USER" 2>/dev/null || true)"
OPENCLAW_CONFIG="$OPENCLAW_HOME/.openclaw"
LAUNCH_SCRIPT_DST="$OPENCLAW_HOME/run-openclaw-podman.sh"
# Prefer systemd user services (Quadlet) for production. Enable lingering early so rootless Podman can run
# without an interactive login.
if command -v loginctl &>/dev/null; then
run_root loginctl enable-linger "$OPENCLAW_USER" 2>/dev/null || true
fi
if [[ -n "${OPENCLAW_UID:-}" && -d /run/user ]] && command -v systemctl &>/dev/null; then
run_root systemctl start "user@${OPENCLAW_UID}.service" 2>/dev/null || true
fi
# Rootless Podman needs subuid/subgid for the run user
if ! grep -q "^${OPENCLAW_USER}:" /etc/subuid 2>/dev/null; then
echo "Warning: $OPENCLAW_USER has no subuid range. Rootless Podman may fail." >&2
echo " Add a line to /etc/subuid and /etc/subgid, e.g.: $OPENCLAW_USER:100000:65536" >&2
fi
echo "Creating $OPENCLAW_CONFIG and workspace..."
run_as_openclaw mkdir -p "$OPENCLAW_CONFIG/workspace"
run_as_openclaw chmod 700 "$OPENCLAW_CONFIG" "$OPENCLAW_CONFIG/workspace" 2>/dev/null || true
ENV_FILE="$OPENCLAW_CONFIG/.env"
if run_as_openclaw test -f "$ENV_FILE"; then
if ! run_as_openclaw grep -q '^OPENCLAW_GATEWAY_TOKEN=' "$ENV_FILE" 2>/dev/null; then
TOKEN="$(generate_token_hex_32)"
printf 'OPENCLAW_GATEWAY_TOKEN=%s\n' "$TOKEN" | run_as_openclaw tee -a "$ENV_FILE" >/dev/null
echo "Added OPENCLAW_GATEWAY_TOKEN to $ENV_FILE."
fi
run_as_openclaw chmod 600 "$ENV_FILE" 2>/dev/null || true
else
TOKEN="$(generate_token_hex_32)"
printf 'OPENCLAW_GATEWAY_TOKEN=%s\n' "$TOKEN" | run_as_openclaw tee "$ENV_FILE" >/dev/null
run_as_openclaw chmod 600 "$ENV_FILE" 2>/dev/null || true
echo "Created $ENV_FILE with new token."
fi
# The gateway refuses to start unless gateway.mode=local is set in config.
# Make first-run non-interactive; users can run the wizard later to configure channels/providers.
OPENCLAW_JSON="$OPENCLAW_CONFIG/openclaw.json"
if ! run_as_openclaw test -f "$OPENCLAW_JSON"; then
printf '%s\n' '{ gateway: { mode: "local" } }' | run_as_openclaw tee "$OPENCLAW_JSON" >/dev/null
run_as_openclaw chmod 600 "$OPENCLAW_JSON" 2>/dev/null || true
echo "Created $OPENCLAW_JSON (minimal gateway.mode=local)."
fi
echo "Building image from $REPO_PATH..."
BUILD_ARGS=()
[[ -n "${OPENCLAW_DOCKER_APT_PACKAGES:-}" ]] && BUILD_ARGS+=(--build-arg "OPENCLAW_DOCKER_APT_PACKAGES=${OPENCLAW_DOCKER_APT_PACKAGES}")
[[ -n "${OPENCLAW_EXTENSIONS:-}" ]] && BUILD_ARGS+=(--build-arg "OPENCLAW_EXTENSIONS=${OPENCLAW_EXTENSIONS}")
podman build ${BUILD_ARGS[@]+"${BUILD_ARGS[@]}"} -t openclaw:local -f "$REPO_PATH/Dockerfile" "$REPO_PATH"
echo "Loading image into $OPENCLAW_USER's Podman store..."
TMP_IMAGE_DIR="$(resolve_image_tmp_dir)"
echo "Using temporary image dir: $TMP_IMAGE_DIR"
TMP_STAGE_DIR="$(mktemp -d -p "$TMP_IMAGE_DIR" openclaw-image.XXXXXX)"
TMP_IMAGE="$TMP_STAGE_DIR/image.tar"
chmod 700 "$TMP_STAGE_DIR"
trap 'rm -rf "$TMP_STAGE_DIR"' EXIT
podman save openclaw:local -o "$TMP_IMAGE"
chmod 600 "$TMP_IMAGE"
# Stream the image into the target user's podman load so private temp directories
# do not need to be traversable by $OPENCLAW_USER.
cat "$TMP_IMAGE" | run_as_user "$OPENCLAW_USER" env HOME="$OPENCLAW_HOME" podman load
rm -rf "$TMP_STAGE_DIR"
trap - EXIT
echo "Copying launch script to $LAUNCH_SCRIPT_DST..."
run_root cat "$RUN_SCRIPT_SRC" | run_as_openclaw tee "$LAUNCH_SCRIPT_DST" >/dev/null
run_as_openclaw chmod 755 "$LAUNCH_SCRIPT_DST"
# Optionally install systemd quadlet for openclaw user (rootless Podman + systemd)
QUADLET_DIR="$OPENCLAW_HOME/.config/containers/systemd"
if [[ "$INSTALL_QUADLET" == true && -f "$QUADLET_TEMPLATE" ]]; then
echo "Installing systemd quadlet for $OPENCLAW_USER..."
run_as_openclaw mkdir -p "$QUADLET_DIR"
OPENCLAW_HOME_SED="$(escape_sed_replacement_pipe_delim "$OPENCLAW_HOME")"
sed "s|{{OPENCLAW_HOME}}|$OPENCLAW_HOME_SED|g" "$QUADLET_TEMPLATE" | run_as_openclaw tee "$QUADLET_DIR/openclaw.container" >/dev/null
run_as_openclaw chmod 700 "$OPENCLAW_HOME/.config" "$OPENCLAW_HOME/.config/containers" "$QUADLET_DIR" 2>/dev/null || true
run_as_openclaw chmod 600 "$QUADLET_DIR/openclaw.container" 2>/dev/null || true
if command -v systemctl &>/dev/null; then
run_root systemctl --machine "${OPENCLAW_USER}@" --user daemon-reload 2>/dev/null || true
run_root systemctl --machine "${OPENCLAW_USER}@" --user enable openclaw.service 2>/dev/null || true
run_root systemctl --machine "${OPENCLAW_USER}@" --user start openclaw.service 2>/dev/null || true
fi
fi
echo ""
echo "Setup complete. Start the gateway:"
echo " $RUN_SCRIPT_SRC launch"
echo " $RUN_SCRIPT_SRC launch setup # onboarding wizard"
echo "Or as $OPENCLAW_USER (e.g. from cron):"
echo " sudo -u $OPENCLAW_USER $LAUNCH_SCRIPT_DST"
echo " sudo -u $OPENCLAW_USER $LAUNCH_SCRIPT_DST setup"
if [[ "$INSTALL_QUADLET" == true ]]; then
echo "Or use systemd (quadlet):"
echo " sudo systemctl --machine ${OPENCLAW_USER}@ --user start openclaw.service"
echo " sudo systemctl --machine ${OPENCLAW_USER}@ --user status openclaw.service"
else
echo "To install systemd quadlet later: $0 --quadlet"
fi
exec "$SCRIPT_PATH" "$@"

View File

@ -1,9 +1,6 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import * as acpSessionManager from "../acp/control-plane/manager.js";
import type {
AcpCloseSessionInput,
AcpInitializeSessionInput,
} from "../acp/control-plane/manager.types.js";
import type { AcpInitializeSessionInput } from "../acp/control-plane/manager.types.js";
import {
clearRuntimeConfigSnapshot,
setRuntimeConfigSnapshot,
@ -16,6 +13,7 @@ import * as heartbeatWake from "../infra/heartbeat-wake.js";
import {
__testing as sessionBindingServiceTesting,
registerSessionBindingAdapter,
type SessionBindingAdapterCapabilities,
type SessionBindingPlacement,
type SessionBindingRecord,
} from "../infra/outbound/session-binding-service.js";
@ -104,9 +102,8 @@ function replaceSpawnConfig(next: OpenClawConfig): void {
setRuntimeConfigSnapshot(hoisted.state.cfg);
}
function createSessionBindingCapabilities() {
function createSessionBindingCapabilities(): SessionBindingAdapterCapabilities {
return {
adapterAvailable: true,
bindSupported: true,
unbindSupported: true,
placements: ["current", "child"] satisfies SessionBindingPlacement[],
@ -184,9 +181,16 @@ describe("spawnAcpDirect", () => {
metaCleared: false,
});
getAcpSessionManagerSpy.mockReset().mockReturnValue({
initializeSession: async (params: AcpInitializeSessionInput) =>
await hoisted.initializeSessionMock(params),
closeSession: async (params: AcpCloseSessionInput) => await hoisted.closeSessionMock(params),
initializeSession: async (
params: Parameters<
ReturnType<typeof acpSessionManager.getAcpSessionManager>["initializeSession"]
>[0],
) => await hoisted.initializeSessionMock(params),
closeSession: async (
params: Parameters<
ReturnType<typeof acpSessionManager.getAcpSessionManager>["closeSession"]
>[0],
) => await hoisted.closeSessionMock(params),
} as unknown as ReturnType<typeof acpSessionManager.getAcpSessionManager>);
hoisted.initializeSessionMock.mockReset().mockImplementation(async (argsUnknown: unknown) => {
const args = argsUnknown as AcpInitializeSessionInput;

View File

@ -6,25 +6,27 @@ function mockContextDeps(params: {
loadConfig: () => unknown;
discoveredModels?: DiscoveredModel[];
}) {
const ensureOpenClawModelsJson = vi.fn(async () => {});
vi.doMock("../config/config.js", () => ({
loadConfig: params.loadConfig,
}));
vi.doMock("./models-config.js", () => ({
ensureOpenClawModelsJson: vi.fn(async () => {}),
ensureOpenClawModelsJson,
}));
vi.doMock("./agent-paths.js", () => ({
resolveOpenClawAgentDir: () => "/tmp/openclaw-agent",
}));
vi.doMock("./pi-model-discovery.js", () => ({
vi.doMock("./pi-model-discovery-runtime.js", () => ({
discoverAuthStorage: vi.fn(() => ({})),
discoverModels: vi.fn(() => ({
getAll: () => params.discoveredModels ?? [],
})),
}));
return { ensureOpenClawModelsJson };
}
function mockContextModuleDeps(loadConfigImpl: () => unknown) {
mockContextDeps({ loadConfig: loadConfigImpl });
return mockContextDeps({ loadConfig: loadConfigImpl });
}
// Shared mock setup used by multiple tests.
@ -80,14 +82,37 @@ describe("lookupContextTokens", () => {
expect(lookupContextTokens("openrouter/claude-sonnet")).toBe(321_000);
});
it("only warms eagerly for startup commands that need model metadata", async () => {
it("can skip async warmup for read-only callers", async () => {
const { ensureOpenClawModelsJson } = mockContextModuleDeps(() => ({
models: {
providers: {
openrouter: {
models: [{ id: "openrouter/claude-sonnet", contextWindow: 321_000 }],
},
},
},
}));
const { lookupContextTokens } = await import("./context.js");
expect(
lookupContextTokens("openrouter/claude-sonnet", { allowAsyncLoad: false }),
).toBeUndefined();
await flushAsyncWarmup();
expect(ensureOpenClawModelsJson).not.toHaveBeenCalled();
});
it("only warms eagerly for real openclaw startup commands that need model metadata", async () => {
const argvSnapshot = process.argv;
try {
for (const scenario of [
{
argv: ["node", "openclaw", "--profile", "--", "config", "validate"],
argv: ["node", "openclaw", "chat"],
expectedCalls: 1,
},
{
argv: ["node", "openclaw", "--profile", "--", "config", "validate"],
expectedCalls: 0,
},
{
argv: ["node", "openclaw", "logs", "--limit", "5"],
expectedCalls: 0,
@ -97,16 +122,18 @@ describe("lookupContextTokens", () => {
expectedCalls: 0,
},
{
argv: ["node", "openclaw", "gateway", "status", "--json"],
argv: ["node", "scripts/test-built-plugin-singleton.mjs"],
expectedCalls: 0,
},
]) {
vi.resetModules();
const loadConfigMock = vi.fn(() => ({ models: {} }));
mockContextModuleDeps(loadConfigMock);
const { ensureOpenClawModelsJson } = mockContextModuleDeps(loadConfigMock);
process.argv = scenario.argv;
await import("./context.js");
await flushAsyncWarmup();
expect(loadConfigMock).toHaveBeenCalledTimes(scenario.expectedCalls);
expect(ensureOpenClawModelsJson).toHaveBeenCalledTimes(scenario.expectedCalls);
}
} finally {
process.argv = argvSnapshot;
@ -132,8 +159,6 @@ describe("lookupContextTokens", () => {
mockContextModuleDeps(loadConfigMock);
const argvSnapshot = process.argv;
process.argv = ["node", "openclaw", "config", "validate"];
try {
const { lookupContextTokens } = await import("./context.js");
expect(lookupContextTokens("openrouter/claude-sonnet")).toBeUndefined();
@ -144,7 +169,6 @@ describe("lookupContextTokens", () => {
expect(lookupContextTokens("openrouter/claude-sonnet")).toBe(654_321);
expect(loadConfigMock).toHaveBeenCalledTimes(2);
} finally {
process.argv = argvSnapshot;
vi.useRealTimers();
}
});
@ -156,7 +180,7 @@ describe("lookupContextTokens", () => {
]);
const { lookupContextTokens } = await import("./context.js");
// Trigger async cache population.
lookupContextTokens("gemini-3.1-pro-preview");
await flushAsyncWarmup();
// Conservative minimum: bare-id cache feeds runtime flush/compaction paths.
expect(lookupContextTokens("gemini-3.1-pro-preview")).toBe(128_000);
@ -171,7 +195,8 @@ describe("lookupContextTokens", () => {
{ id: "google-gemini-cli/gemini-3.1-pro-preview", contextWindow: 1_048_576 },
]);
const { resolveContextTokensForModel } = await import("./context.js");
const { lookupContextTokens, resolveContextTokensForModel } = await import("./context.js");
lookupContextTokens("google-gemini-cli/gemini-3.1-pro-preview");
await flushAsyncWarmup();
// With provider specified and no config override, bare lookup finds the
@ -224,7 +249,9 @@ describe("lookupContextTokens", () => {
mockDiscoveryDeps([{ id: "google/gemini-2.5-pro", contextWindow: 999_000 }]);
const cfg = createContextOverrideConfig("google", "gemini-2.5-pro", 2_000_000);
const resolveContextTokensForModel = await importResolveContextTokensForModel();
const { lookupContextTokens, resolveContextTokensForModel } = await import("./context.js");
lookupContextTokens("google/gemini-2.5-pro");
await flushAsyncWarmup();
// Google with explicit cfg: config direct scan wins before any cache lookup.
const googleResult = resolveContextTokensForModel({
@ -286,7 +313,9 @@ describe("lookupContextTokens", () => {
mockDiscoveryDeps([{ id: "google/gemini-2.5-pro", contextWindow: 999_000 }]);
const cfg = createContextOverrideConfig("google", "gemini-2.5-pro", 2_000_000);
const resolveContextTokensForModel = await importResolveContextTokensForModel();
const { lookupContextTokens, resolveContextTokensForModel } = await import("./context.js");
lookupContextTokens("google/gemini-2.5-pro");
await flushAsyncWarmup();
// model-only call (no explicit provider) must NOT apply config direct scan.
// Falls through to bare cache lookup: "google/gemini-2.5-pro" → 999k ✓.
@ -317,8 +346,9 @@ describe("lookupContextTokens", () => {
{ id: "google-gemini-cli/gemini-3.1-pro-preview", contextWindow: 1_048_576 },
]);
const { resolveContextTokensForModel } = await import("./context.js");
await new Promise((r) => setTimeout(r, 0));
const { lookupContextTokens, resolveContextTokensForModel } = await import("./context.js");
lookupContextTokens("google-gemini-cli/gemini-3.1-pro-preview");
await flushAsyncWarmup();
// Qualified "google-gemini-cli/gemini-3.1-pro-preview" → 1M wins over
// bare "gemini-3.1-pro-preview" → 128k (cross-provider minimum).

View File

@ -1,6 +1,7 @@
// Lazy-load pi-coding-agent model metadata so we can infer context windows when
// the agent reports a model id. This includes custom models.json entries.
import path from "node:path";
import { loadConfig } from "../config/config.js";
import type { OpenClawConfig } from "../config/config.js";
import { computeBackoff, type BackoffPolicy } from "../infra/backoff.js";
@ -84,6 +85,19 @@ let configuredConfig: OpenClawConfig | undefined;
let configLoadFailures = 0;
let nextConfigLoadAttemptAtMs = 0;
function isLikelyOpenClawCliProcess(argv: string[] = process.argv): boolean {
const entryBasename = path
.basename(argv[1] ?? "")
.trim()
.toLowerCase();
return (
entryBasename === "openclaw" ||
entryBasename === "openclaw.mjs" ||
entryBasename === "entry.js" ||
entryBasename === "entry.mjs"
);
}
function getCommandPathFromArgv(argv: string[]): string[] {
const args = argv.slice(2);
const tokens: string[] = [];
@ -125,9 +139,19 @@ const SKIP_EAGER_WARMUP_PRIMARY_COMMANDS = new Set([
"webhooks",
]);
function shouldSkipEagerContextWindowWarmup(argv: string[] = process.argv): boolean {
function shouldEagerWarmContextWindowCache(argv: string[] = process.argv): boolean {
// Keep this gate tied to the real OpenClaw CLI entrypoints.
//
// This module can also land inside shared dist chunks that are imported from
// plugin-sdk/library surfaces during smoke tests and plugin loading. If we do
// eager warmup for those generic Node script imports, merely importing the
// built plugin-sdk can call ensureOpenClawModelsJson(), which cascades into
// plugin discovery and breaks dist/source singleton assumptions.
if (!isLikelyOpenClawCliProcess(argv)) {
return false;
}
const [primary] = getCommandPathFromArgv(argv);
return primary ? SKIP_EAGER_WARMUP_PRIMARY_COMMANDS.has(primary) : false;
return Boolean(primary) && !SKIP_EAGER_WARMUP_PRIMARY_COMMANDS.has(primary);
}
function primeConfiguredContextWindows(): OpenClawConfig | undefined {
@ -201,18 +225,23 @@ function ensureContextWindowCacheLoaded(): Promise<void> {
return loadPromise;
}
export function lookupContextTokens(modelId?: string): number | undefined {
export function lookupContextTokens(
modelId?: string,
options?: { allowAsyncLoad?: boolean },
): number | undefined {
if (!modelId) {
return undefined;
}
// Best-effort: kick off loading, but don't block.
void ensureContextWindowCacheLoaded();
// Best-effort: kick off loading on demand, but don't block lookups.
if (options?.allowAsyncLoad !== false) {
void ensureContextWindowCacheLoaded();
}
return MODEL_CACHE.get(modelId);
}
if (!shouldSkipEagerContextWindowWarmup()) {
// Keep prior behavior where model limits begin loading during startup.
// This avoids a cold-start miss on the first context token lookup.
if (shouldEagerWarmContextWindowCache()) {
// Keep startup warmth for the real CLI, but avoid import-time side effects
// when this module is pulled in through library/plugin-sdk surfaces.
void ensureContextWindowCacheLoaded();
}
@ -330,6 +359,7 @@ export function resolveContextTokensForModel(params: {
model?: string;
contextTokensOverride?: number;
fallbackContextTokens?: number;
allowAsyncLoad?: boolean;
}): number | undefined {
if (typeof params.contextTokensOverride === "number" && params.contextTokensOverride > 0) {
return params.contextTokensOverride;
@ -378,6 +408,7 @@ export function resolveContextTokensForModel(params: {
if (params.provider && ref && !ref.model.includes("/")) {
const qualifiedResult = lookupContextTokens(
`${normalizeProviderId(ref.provider)}/${ref.model}`,
{ allowAsyncLoad: params.allowAsyncLoad },
);
if (qualifiedResult !== undefined) {
return qualifiedResult;
@ -386,7 +417,9 @@ export function resolveContextTokensForModel(params: {
// Bare key fallback. For model-only calls with slash-containing IDs
// (e.g. "google/gemini-2.5-pro") this IS the raw discovery cache key.
const bareResult = lookupContextTokens(params.model);
const bareResult = lookupContextTokens(params.model, {
allowAsyncLoad: params.allowAsyncLoad,
});
if (bareResult !== undefined) {
return bareResult;
}
@ -397,6 +430,7 @@ export function resolveContextTokensForModel(params: {
if (!params.provider && ref && !ref.model.includes("/")) {
const qualifiedResult = lookupContextTokens(
`${normalizeProviderId(ref.provider)}/${ref.model}`,
{ allowAsyncLoad: params.allowAsyncLoad },
);
if (qualifiedResult !== undefined) {
return qualifiedResult;

View File

@ -49,6 +49,8 @@ type MockSubagentRun = {
error?: string;
};
};
type SessionEntryFixture = Omit<SessionEntry, "updatedAt"> & { updatedAt?: number };
type SessionStoreFixture = Record<string, SessionEntryFixture | undefined>;
const agentSpy = vi.fn(async (_req: AgentCallRequest) => ({ runId: "run-main", status: "ok" }));
const sendSpy = vi.fn(async (_req: AgentCallRequest) => ({ runId: "send-main", status: "ok" }));
@ -119,8 +121,7 @@ const hookRunnerMock = {
const chatHistoryMock = vi.fn(async (_sessionKey?: string) => ({
messages: [] as Array<unknown>,
}));
type TestSessionStore = Record<string, Partial<SessionEntry>>;
let sessionStore: TestSessionStore = {};
let sessionStore: SessionStoreFixture = {};
let configOverride: OpenClawConfig = {
session: {
mainKey: "main",
@ -163,21 +164,21 @@ function toSessionEntry(
}
function loadSessionStoreFixture(): Record<string, SessionEntry> {
return new Proxy({} as Record<string, SessionEntry>, {
get(_target, key: string | symbol) {
return new Proxy(sessionStore, {
get(target, key: string | symbol) {
if (typeof key !== "string") {
return undefined;
}
if (!(key in sessionStore) && key.includes(":subagent:")) {
if (!(key in target) && key.includes(":subagent:")) {
return toSessionEntry(key, {
inputTokens: 1,
outputTokens: 1,
totalTokens: 2,
});
}
return toSessionEntry(key, sessionStore[key]);
return toSessionEntry(key, target[key]);
},
});
}) as unknown as Record<string, SessionEntry>;
}
vi.mock("./subagent-registry.js", () => subagentRegistryMock);
@ -2387,7 +2388,7 @@ describe("subagent announce formatting", () => {
requesterOrigin: { channel: "whatsapp", to: "+1555", accountId: "acct-main" },
});
sessionStore = {
"agent:main:subagent:orchestrator": undefined as unknown as Record<string, unknown>,
"agent:main:subagent:orchestrator": undefined,
};
const didAnnounce = await runSubagentAnnounceFlow({
@ -2411,7 +2412,7 @@ describe("subagent announce formatting", () => {
subagentRegistryMock.isSubagentSessionRunActive.mockReturnValue(false);
subagentRegistryMock.resolveRequesterForChildSession.mockReturnValue(null);
sessionStore = {
"agent:main:subagent:orchestrator": undefined as unknown as Record<string, unknown>,
"agent:main:subagent:orchestrator": undefined,
};
const didAnnounce = await runSubagentAnnounceFlow({
@ -2574,7 +2575,7 @@ describe("subagent announce formatting", () => {
embeddedRunMock.isEmbeddedPiRunActive.mockReturnValue(false);
embeddedRunMock.isEmbeddedPiRunStreaming.mockReturnValue(false);
subagentRegistryMock.isSubagentSessionRunActive.mockReturnValue(false);
sessionStore = testCase.sessionStoreFixture as Record<string, Record<string, unknown>>;
sessionStore = testCase.sessionStoreFixture as SessionStoreFixture;
subagentRegistryMock.resolveRequesterForChildSession.mockReturnValue({
requesterSessionKey: "agent:main:main",
requesterOrigin: { channel: "discord", accountId: "jaris-account" },

View File

@ -30,6 +30,7 @@ function resolveProviderFromContext(ctx: MsgContext, cfg: OpenClawConfig): Chann
}
const direct =
normalizeAnyChannelId(explicitMessageChannel ?? undefined) ??
(explicitMessageChannel as ChannelId | undefined) ??
normalizeAnyChannelId(ctx.Provider) ??
normalizeAnyChannelId(ctx.Surface) ??
normalizeAnyChannelId(ctx.OriginatingChannel);
@ -46,6 +47,7 @@ function resolveProviderFromContext(ctx: MsgContext, cfg: OpenClawConfig): Chann
}
const normalized =
normalizeAnyChannelId(normalizedCandidateChannel ?? undefined) ??
(normalizedCandidateChannel as ChannelId | undefined) ??
normalizeAnyChannelId(candidate);
if (normalized) {
return normalized;
@ -254,6 +256,50 @@ function resolveSenderCandidates(params: {
return normalized;
}
function resolveFallbackAllowFrom(params: {
cfg: OpenClawConfig;
providerId?: ChannelId;
accountId?: string | null;
}): Array<string | number> {
const providerId = params.providerId?.trim();
if (!providerId) {
return [];
}
const channels = params.cfg.channels as
| Record<
string,
| {
allowFrom?: Array<string | number>;
dm?: { allowFrom?: Array<string | number> };
accounts?: Record<
string,
{
allowFrom?: Array<string | number>;
dm?: { allowFrom?: Array<string | number> };
}
>;
}
| undefined
>
| undefined;
const channelCfg = channels?.[providerId];
const accountCfg = params.accountId ? channelCfg?.accounts?.[params.accountId] : undefined;
const allowFrom =
accountCfg?.allowFrom ??
accountCfg?.dm?.allowFrom ??
channelCfg?.allowFrom ??
channelCfg?.dm?.allowFrom;
return Array.isArray(allowFrom) ? allowFrom : [];
}
function resolveFallbackCommandOptions(providerId?: ChannelId): {
enforceOwnerForCommands: boolean;
} {
return {
enforceOwnerForCommands: providerId === "whatsapp",
};
}
export function resolveCommandAuthorization(params: {
ctx: MsgContext;
cfg: OpenClawConfig;
@ -275,7 +321,11 @@ export function resolveCommandAuthorization(params: {
const allowFromRaw = plugin?.config?.resolveAllowFrom
? plugin.config.resolveAllowFrom({ cfg, accountId: ctx.AccountId })
: [];
: resolveFallbackAllowFrom({
cfg,
providerId,
accountId: ctx.AccountId,
});
const allowFromList = formatAllowFromList({
plugin,
cfg,
@ -344,7 +394,10 @@ export function resolveCommandAuthorization(params: {
: undefined;
const senderId = matchedSender ?? senderCandidates[0];
const enforceOwner = Boolean(plugin?.commands?.enforceOwnerForCommands);
const enforceOwner = Boolean(
plugin?.commands?.enforceOwnerForCommands ??
resolveFallbackCommandOptions(providerId).enforceOwnerForCommands,
);
const senderIsOwnerByIdentity = Boolean(matchedSender);
const senderIsOwnerByScope =
isInternalMessageChannel(ctx.Provider) &&

View File

@ -1,4 +1,10 @@
import {
buildConfiguredAcpSessionKey,
normalizeBindingConfig,
type ConfiguredAcpBindingChannel,
} from "../../acp/persistent-bindings.types.js";
import { resolveConfiguredBindingRecord } from "../../channels/plugins/binding-registry.js";
import { listAcpBindings } from "../../config/bindings.js";
import type { OpenClawConfig } from "../../config/config.js";
import { getSessionBindingService } from "../../infra/outbound/session-binding-service.js";
import { DEFAULT_ACCOUNT_ID, isAcpSessionKey } from "../../routing/session-key.js";
@ -7,6 +13,52 @@ function normalizeText(value: string | undefined | null): string {
return value?.trim() ?? "";
}
function resolveRawConfiguredAcpSessionKey(params: {
cfg: OpenClawConfig;
channel: string;
accountId: string;
conversationId: string;
parentConversationId?: string;
}): string | undefined {
for (const binding of listAcpBindings(params.cfg)) {
const bindingChannel = normalizeText(binding.match.channel).toLowerCase();
if (!bindingChannel || bindingChannel !== params.channel) {
continue;
}
const bindingAccountId = normalizeText(binding.match.accountId);
if (bindingAccountId && bindingAccountId !== "*" && bindingAccountId !== params.accountId) {
continue;
}
const peerId = normalizeText(binding.match.peer?.id);
const matchedConversationId =
peerId === params.conversationId
? params.conversationId
: peerId && peerId === params.parentConversationId
? params.parentConversationId
: undefined;
if (!matchedConversationId) {
continue;
}
const acp = normalizeBindingConfig(binding.acp);
return buildConfiguredAcpSessionKey({
channel: params.channel as ConfiguredAcpBindingChannel,
accountId: bindingAccountId && bindingAccountId !== "*" ? bindingAccountId : params.accountId,
conversationId: matchedConversationId,
...(params.parentConversationId ? { parentConversationId: params.parentConversationId } : {}),
agentId: binding.agentId,
mode: acp.mode === "oneshot" ? "oneshot" : "persistent",
...(acp.cwd ? { cwd: acp.cwd } : {}),
...(acp.backend ? { backend: acp.backend } : {}),
...(acp.label ? { label: acp.label } : {}),
});
}
return undefined;
}
export function resolveEffectiveResetTargetSessionKey(params: {
cfg: OpenClawConfig;
channel?: string | null;
@ -68,6 +120,18 @@ export function resolveEffectiveResetTargetSessionKey(params: {
}
return isAcpSessionKey(configuredSessionKey) ? configuredSessionKey : undefined;
}
const rawConfiguredSessionKey = resolveRawConfiguredAcpSessionKey({
cfg: params.cfg,
channel,
accountId,
conversationId,
...(parentConversationId ? { parentConversationId } : {}),
});
if (rawConfiguredSessionKey) {
return rawConfiguredSessionKey;
}
if (params.fallbackToActiveAcpWhenUnbound === false) {
return undefined;
}

View File

@ -117,6 +117,17 @@ function resolveMentionPatterns(cfg: OpenClawConfig | undefined, agentId?: strin
return derived.length > 0 ? derived : [];
}
function resolveFallbackProviderMentionStripRegexes(providerId?: string | null): RegExp[] {
switch (providerId?.trim().toLowerCase()) {
case "discord":
return [/<@!?\d+>/gi];
case "slack":
return [/<@[^>\s]+>/gi];
default:
return [];
}
}
export function buildMentionRegexes(cfg: OpenClawConfig | undefined, agentId?: string): RegExp[] {
const patterns = normalizeMentionPatterns(resolveMentionPatterns(cfg, agentId));
return compileMentionPatternsCached({
@ -215,7 +226,9 @@ export function stripMentions(
cache: mentionStripRegexCompileCache,
warnRejected: false,
});
for (const re of [...configRegexes, ...providerRegexes]) {
const fallbackProviderRegexes =
providerRegexes.length > 0 ? [] : resolveFallbackProviderMentionStripRegexes(providerId);
for (const re of [...configRegexes, ...providerRegexes, ...fallbackProviderRegexes]) {
result = result.replace(re, " ");
}
if (providerMentions?.stripMentions) {

View File

@ -1846,7 +1846,7 @@ describe("persistSessionUsageUpdate", () => {
},
},
},
} as OpenClawConfig,
} satisfies OpenClawConfig,
usage: { input: 2_000, output: 500, cacheRead: 1_000, cacheWrite: 200 },
lastCallUsage: { input: 800, output: 200, cacheRead: 300, cacheWrite: 50 },
providerUsed: "openai",
@ -1892,7 +1892,7 @@ describe("persistSessionUsageUpdate", () => {
},
},
},
} as OpenClawConfig,
} satisfies OpenClawConfig,
usage: { input: 5_107, output: 1_827, cacheRead: 1_536, cacheWrite: 0 },
lastCallUsage: { input: 5_107, output: 1_827, cacheRead: 1_536, cacheWrite: 0 },
providerUsed: "openai-codex",

Some files were not shown because too many files have changed in this diff Show More