Merge branch 'main' into codex/cortex-openclaw-integration

This commit is contained in:
Marc J Saint-jour 2026-03-13 17:40:48 -04:00 committed by GitHub
commit b9fc0b94ca
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
472 changed files with 28179 additions and 14933 deletions

View File

@ -1,5 +1,11 @@
.git
.worktrees
# Sensitive files docker-setup.sh writes .env with OPENCLAW_GATEWAY_TOKEN
# into the project root; keep it out of the build context.
.env
.env.*
.bun-cache
.bun
.tmp

View File

@ -7,7 +7,7 @@ on:
concurrency:
group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
cancel-in-progress: true
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
@ -38,9 +38,8 @@ jobs:
id: check
uses: ./.github/actions/detect-docs-changes
# Detect which heavy areas are touched so PRs can skip unrelated expensive jobs.
# Push to main keeps broad coverage, but this job still needs to run so
# downstream jobs that list it in `needs` are not skipped.
# Detect which heavy areas are touched so CI can skip unrelated expensive jobs.
# Fail-safe: if detection fails, downstream jobs run.
changed-scope:
needs: [docs-scope]
if: needs.docs-scope.outputs.docs_only != 'true'
@ -82,7 +81,7 @@ jobs:
# Build dist once for Node-relevant changes and share it with downstream jobs.
build-artifacts:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
@ -141,7 +140,7 @@ jobs:
checks:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
strategy:
fail-fast: false
@ -149,6 +148,13 @@ jobs:
include:
- runtime: node
task: test
shard_index: 1
shard_count: 2
command: pnpm canvas:a2ui:bundle && pnpm test
- runtime: node
task: test
shard_index: 2
shard_count: 2
command: pnpm canvas:a2ui:bundle && pnpm test
- runtime: node
task: extensions
@ -160,40 +166,47 @@ jobs:
task: test
command: pnpm canvas:a2ui:bundle && bunx vitest run --config vitest.unit.config.ts
steps:
- name: Skip bun lane on push
if: github.event_name == 'push' && matrix.runtime == 'bun'
run: echo "Skipping bun test lane on push events."
- name: Skip bun lane on pull requests
if: github.event_name == 'pull_request' && matrix.runtime == 'bun'
run: echo "Skipping Bun compatibility lane on pull requests."
- name: Checkout
if: github.event_name != 'push' || matrix.runtime != 'bun'
if: github.event_name != 'pull_request' || matrix.runtime != 'bun'
uses: actions/checkout@v6
with:
submodules: false
- name: Setup Node environment
if: matrix.runtime != 'bun' || github.event_name != 'push'
if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
uses: ./.github/actions/setup-node-env
with:
install-bun: "${{ matrix.runtime == 'bun' }}"
use-sticky-disk: "false"
- name: Configure Node test resources
if: (github.event_name != 'push' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
if: (github.event_name != 'pull_request' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
env:
SHARD_COUNT: ${{ matrix.shard_count || '' }}
SHARD_INDEX: ${{ matrix.shard_index || '' }}
run: |
# `pnpm test` runs `scripts/test-parallel.mjs`, which spawns multiple Node processes.
# Default heap limits have been too low on Linux CI (V8 OOM near 4GB).
echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV"
echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV"
if [ -n "$SHARD_COUNT" ] && [ -n "$SHARD_INDEX" ]; then
echo "OPENCLAW_TEST_SHARDS=$SHARD_COUNT" >> "$GITHUB_ENV"
echo "OPENCLAW_TEST_SHARD_INDEX=$SHARD_INDEX" >> "$GITHUB_ENV"
fi
- name: Run ${{ matrix.task }} (${{ matrix.runtime }})
if: matrix.runtime != 'bun' || github.event_name != 'push'
if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
run: ${{ matrix.command }}
# Types, lint, and format check.
check:
name: "check"
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
@ -239,7 +252,7 @@ jobs:
compat-node22:
name: "compat-node22"
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
@ -272,7 +285,7 @@ jobs:
skills-python:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true' || needs.changed-scope.outputs.run_skills_python == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_skills_python == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
@ -365,7 +378,7 @@ jobs:
checks-windows:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_windows == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_windows == 'true'
runs-on: blacksmith-32vcpu-windows-2025
timeout-minutes: 45
env:
@ -727,7 +740,7 @@ jobs:
android:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_android == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_android == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
strategy:
fail-fast: false
@ -747,23 +760,37 @@ jobs:
uses: actions/setup-java@v5
with:
distribution: temurin
# setup-android's sdkmanager currently crashes on JDK 21 in CI.
# Keep sdkmanager on the stable JDK path for Linux CI runners.
java-version: 17
- name: Setup Android SDK
uses: android-actions/setup-android@v3
with:
accept-android-sdk-licenses: false
- name: Setup Android SDK cmdline-tools
run: |
set -euo pipefail
ANDROID_SDK_ROOT="$HOME/.android-sdk"
CMDLINE_TOOLS_VERSION="12266719"
ARCHIVE="commandlinetools-linux-${CMDLINE_TOOLS_VERSION}_latest.zip"
URL="https://dl.google.com/android/repository/${ARCHIVE}"
mkdir -p "$ANDROID_SDK_ROOT/cmdline-tools"
curl -fsSL "$URL" -o "/tmp/${ARCHIVE}"
rm -rf "$ANDROID_SDK_ROOT/cmdline-tools/latest"
unzip -q "/tmp/${ARCHIVE}" -d "$ANDROID_SDK_ROOT/cmdline-tools"
mv "$ANDROID_SDK_ROOT/cmdline-tools/cmdline-tools" "$ANDROID_SDK_ROOT/cmdline-tools/latest"
echo "ANDROID_SDK_ROOT=$ANDROID_SDK_ROOT" >> "$GITHUB_ENV"
echo "ANDROID_HOME=$ANDROID_SDK_ROOT" >> "$GITHUB_ENV"
echo "$ANDROID_SDK_ROOT/cmdline-tools/latest/bin" >> "$GITHUB_PATH"
echo "$ANDROID_SDK_ROOT/platform-tools" >> "$GITHUB_PATH"
- name: Setup Gradle
uses: gradle/actions/setup-gradle@v4
uses: gradle/actions/setup-gradle@v5
with:
gradle-version: 8.11.1
- name: Install Android SDK packages
run: |
yes | sdkmanager --licenses >/dev/null
sdkmanager --install \
yes | sdkmanager --sdk_root="${ANDROID_SDK_ROOT}" --licenses >/dev/null
sdkmanager --sdk_root="${ANDROID_SDK_ROOT}" --install \
"platform-tools" \
"platforms;android-36" \
"build-tools;36.0.0"

16
.jscpd.json Normal file
View File

@ -0,0 +1,16 @@
{
"gitignore": true,
"noSymlinks": true,
"ignore": [
"**/node_modules/**",
"**/dist/**",
"dist/**",
"**/.git/**",
"**/coverage/**",
"**/build/**",
"**/.build/**",
"**/.artifacts/**",
"docs/zh-CN/**",
"**/CHANGELOG.md"
]
}

View File

@ -201,6 +201,14 @@
## Agent-Specific Notes
- Vocabulary: "makeup" = "mac app".
- Parallels macOS retests: use the snapshot most closely named like `macOS 26.3.1 fresh` when the user asks for a clean/fresh macOS rerun; avoid older Tahoe snapshots unless explicitly requested.
- Parallels macOS smoke playbook:
- `prlctl exec` is fine for deterministic repo commands, but it can misrepresent interactive shell behavior (`PATH`, `HOME`, `curl | bash`, shebang resolution). For installer parity or shell-sensitive repros, prefer the guest Terminal or `prlctl enter`.
- Fresh Tahoe snapshot current reality: `brew` exists, `node` may not be on `PATH` in noninteractive guest exec. Use absolute `/opt/homebrew/bin/node` for repo/CLI runs when needed.
- Fresh host-served tgz install: restore fresh snapshot, install tgz as guest root with `HOME=/var/root`, then run onboarding as the desktop user via `prlctl exec --current-user`.
- For `openclaw onboard --non-interactive --secret-input-mode ref --install-daemon`, expect env-backed auth-profile refs (for example `OPENAI_API_KEY`) to be copied into the service env at install time; this path was fixed and should stay green.
- Dont run local + gateway agent turns in parallel on the same fresh workspace/session; they can collide on the session lock. Run sequentially.
- Root-installed tarball smoke on Tahoe can still log plugin blocks for world-writable `extensions/*` under `/opt/homebrew/lib/node_modules/openclaw`; treat that as separate from onboarding/gateway health unless the task is plugin loading.
- Never edit `node_modules` (global/Homebrew/npm/git installs too). Updates overwrite. Skill notes go in `tools.md` or `AGENTS.md`.
- When adding a new `AGENTS.md` anywhere in the repo, also add a `CLAUDE.md` symlink pointing to it (example: `ln -s AGENTS.md CLAUDE.md`).
- Signal: "update fly" => `fly ssh console -a flawd-bot -C "bash -lc 'cd /data/clawd/openclaw && git pull --rebase origin main'"` then `fly machines restart e825232f34d058 -a flawd-bot`.

View File

@ -9,9 +9,13 @@ Docs: https://docs.openclaw.ai
- Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus.
- Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei.
- iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show `/pair qr` instructions on the connect step. (#45054) Thanks @ngutman.
- Browser/existing-session: add an official Chrome DevTools MCP attach mode for signed-in live Chrome sessions, with docs for `chrome://inspect/#remote-debugging` enablement and direct backlinks to Chromes own setup guides.
### Fixes
- Browser/existing-session: accept text-only `list_pages` and `new_page` responses from Chrome DevTools MCP so live-session tab discovery and new-tab open flows keep working when the server omits structured page metadata.
- Ollama/reasoning visibility: stop promoting native `thinking` and `reasoning` fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang.
- Cron/isolated sessions: route nested cron-triggered embedded runner work onto the nested lane so isolated cron jobs no longer deadlock when compaction or other queued inner work runs. Thanks @vincentkoc.
- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups.
- Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding.
- Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus.
@ -21,6 +25,7 @@ Docs: https://docs.openclaw.ai
- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei.
- Agents/OpenAI-compatible compat overrides: respect explicit user `models[].compat` opt-ins for non-native `openai-completions` endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference.
- Agents/Azure OpenAI startup prompts: rephrase the built-in `/new`, `/reset`, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97.
- Windows/gateway stop: resolve Startup-folder fallback listeners from the installed `gateway.cmd` port, so `openclaw gateway stop` now actually kills fallback-launched gateway processes before restart.
- Config/validation: accept documented `agents.list[].params` per-agent overrides in strict config validation so `openclaw config validate` no longer rejects runtime-supported `cacheRetention`, `temperature`, and `maxTokens` settings. (#41171) Thanks @atian8179.
- Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus.
- Config/web fetch: restore runtime validation for documented `tools.web.fetch.readability` and `tools.web.fetch.firecrawl` settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec.
@ -28,10 +33,18 @@ Docs: https://docs.openclaw.ai
- Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh.
- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates.
- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path.
- Security/exec approvals: recognize PowerShell `-File` and `-f` wrapper forms during inline-command extraction so approval and command-analysis paths treat file-based PowerShell launches like the existing `-Command` variants.
- Security/exec approvals: unwrap `env` dispatch wrappers inside shell-segment allowlist resolution on macOS so `env FOO=bar /path/to/bin` resolves against the effective executable instead of the wrapper token.
- Security/exec approvals: treat backslash-newline as shell line continuation during macOS shell-chain parsing so line-continued `$(` substitutions fail closed instead of slipping past command-substitution checks.
- Security/exec approvals: bind macOS skill auto-allow trust to both executable name and resolved path so same-basename binaries no longer inherit trust from unrelated skill bins.
- Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed `EXTERNAL_UNTRUSTED_CONTENT` markers fall back to the existing hardening path instead of bypassing marker normalization.
- Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark.
- macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots.
- Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello.
- Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin.
- Slack/probe: keep `auth.test()` bot and team metadata mapping stable while simplifying the probe result path. (#44775) Thanks @Cafexss.
- Dashboard/chat UI: restore the `chat-new-messages` class on the New messages scroll pill so the button uses its existing compact styling instead of rendering as a full-screen SVG overlay. (#44856) Thanks @Astro-Han.
- Windows/gateway status: reuse the installed service command environment when reading runtime status, so startup-fallback gateways keep reporting the configured port and running state in `gateway status --json` instead of falling back to `gateway port unknown`.
## 2026.3.12
@ -44,6 +57,7 @@ Docs: https://docs.openclaw.ai
- Docs/Kubernetes: Add a starter K8s install path with raw manifests, Kind setup, and deployment docs. Thanks @sallyom @dzianisv @egkristi
- Agents/subagents: add `sessions_yield` so orchestrators can end the current turn immediately, skip queued tool work, and carry a hidden follow-up payload into the next session turn. (#36537) thanks @jriff
- Slack/agent replies: support `channelData.slack.blocks` in the shared reply delivery path so agents can send Block Kit messages through standard Slack outbound delivery. (#44592) Thanks @vincentkoc.
- Slack/interactive replies: add opt-in Slack button and select reply directives behind `channels.slack.capabilities.interactiveReplies`, disabled by default unless explicitly enabled. (#44607) Thanks @vincentkoc.
### Fixes
@ -117,6 +131,7 @@ Docs: https://docs.openclaw.ai
- Delivery/dedupe: trim completed direct-cron delivery cache correctly and keep mirrored transcript dedupe active even when transcript files contain malformed lines. (#44666) thanks @frankekn.
- CLI/thinking help: add the missing `xhigh` level hints to `openclaw cron add`, `openclaw cron edit`, and `openclaw agent` so the help text matches the levels already accepted at runtime. (#44819) Thanks @kiki830621.
- Agents/Anthropic replay: drop replayed assistant thinking blocks for native Anthropic and Bedrock Claude providers so persisted follow-up turns no longer fail on stored thinking blocks. (#44843) Thanks @jmcte.
- Docs/Brave pricing: escape literal dollar signs in Brave Search cost text so the docs render the free credit and per-request pricing correctly. (#44989) Thanks @keelanfh.
## 2026.3.11

View File

@ -45,8 +45,8 @@ enum ExecApprovalEvaluator {
let skillAllow: Bool
if approvals.agent.autoAllowSkills, !allowlistResolutions.isEmpty {
let bins = await SkillBinsCache.shared.currentBins()
skillAllow = allowlistResolutions.allSatisfy { bins.contains($0.executableName) }
let bins = await SkillBinsCache.shared.currentTrust()
skillAllow = self.isSkillAutoAllowed(allowlistResolutions, trustedBinsByName: bins)
} else {
skillAllow = false
}
@ -65,4 +65,26 @@ enum ExecApprovalEvaluator {
allowlistMatch: allowlistSatisfied ? allowlistMatches.first : nil,
skillAllow: skillAllow)
}
static func isSkillAutoAllowed(
_ resolutions: [ExecCommandResolution],
trustedBinsByName: [String: Set<String>]) -> Bool
{
guard !resolutions.isEmpty, !trustedBinsByName.isEmpty else { return false }
return resolutions.allSatisfy { resolution in
guard let executableName = SkillBinsCache.normalizeSkillBinName(resolution.executableName),
let resolvedPath = SkillBinsCache.normalizeResolvedPath(resolution.resolvedPath)
else {
return false
}
return trustedBinsByName[executableName]?.contains(resolvedPath) == true
}
}
static func _testIsSkillAutoAllowed(
_ resolutions: [ExecCommandResolution],
trustedBinsByName: [String: Set<String>]) -> Bool
{
self.isSkillAutoAllowed(resolutions, trustedBinsByName: trustedBinsByName)
}
}

View File

@ -777,6 +777,7 @@ actor SkillBinsCache {
static let shared = SkillBinsCache()
private var bins: Set<String> = []
private var trustByName: [String: Set<String>] = [:]
private var lastRefresh: Date?
private let refreshInterval: TimeInterval = 90
@ -787,27 +788,90 @@ actor SkillBinsCache {
return self.bins
}
func currentTrust(force: Bool = false) async -> [String: Set<String>] {
if force || self.isStale() {
await self.refresh()
}
return self.trustByName
}
func refresh() async {
do {
let report = try await GatewayConnection.shared.skillsStatus()
var next = Set<String>()
for skill in report.skills {
for bin in skill.requirements.bins {
let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines)
if !trimmed.isEmpty { next.insert(trimmed) }
}
}
self.bins = next
let trust = Self.buildTrustIndex(report: report, searchPaths: CommandResolver.preferredPaths())
self.bins = trust.names
self.trustByName = trust.pathsByName
self.lastRefresh = Date()
} catch {
if self.lastRefresh == nil {
self.bins = []
self.trustByName = [:]
}
}
}
static func normalizeSkillBinName(_ value: String) -> String? {
let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines).lowercased()
return trimmed.isEmpty ? nil : trimmed
}
static func normalizeResolvedPath(_ value: String?) -> String? {
let trimmed = value?.trimmingCharacters(in: .whitespacesAndNewlines) ?? ""
guard !trimmed.isEmpty else { return nil }
return URL(fileURLWithPath: trimmed).standardizedFileURL.path
}
static func buildTrustIndex(
report: SkillsStatusReport,
searchPaths: [String]) -> SkillBinTrustIndex
{
var names = Set<String>()
var pathsByName: [String: Set<String>] = [:]
for skill in report.skills {
for bin in skill.requirements.bins {
let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { continue }
names.insert(trimmed)
guard let name = self.normalizeSkillBinName(trimmed),
let resolvedPath = self.resolveSkillBinPath(trimmed, searchPaths: searchPaths),
let normalizedPath = self.normalizeResolvedPath(resolvedPath)
else {
continue
}
var paths = pathsByName[name] ?? Set<String>()
paths.insert(normalizedPath)
pathsByName[name] = paths
}
}
return SkillBinTrustIndex(names: names, pathsByName: pathsByName)
}
private static func resolveSkillBinPath(_ bin: String, searchPaths: [String]) -> String? {
let expanded = bin.hasPrefix("~") ? (bin as NSString).expandingTildeInPath : bin
if expanded.contains("/") || expanded.contains("\\") {
return FileManager().isExecutableFile(atPath: expanded) ? expanded : nil
}
return CommandResolver.findExecutable(named: expanded, searchPaths: searchPaths)
}
private func isStale() -> Bool {
guard let lastRefresh else { return true }
return Date().timeIntervalSince(lastRefresh) > self.refreshInterval
}
static func _testBuildTrustIndex(
report: SkillsStatusReport,
searchPaths: [String]) -> SkillBinTrustIndex
{
self.buildTrustIndex(report: report, searchPaths: searchPaths)
}
}
struct SkillBinTrustIndex {
let names: Set<String>
let pathsByName: [String: Set<String>]
}

View File

@ -37,8 +37,7 @@ struct ExecCommandResolution {
var resolutions: [ExecCommandResolution] = []
resolutions.reserveCapacity(segments.count)
for segment in segments {
guard let token = self.parseFirstToken(segment),
let resolution = self.resolveExecutable(rawExecutable: token, cwd: cwd, env: env)
guard let resolution = self.resolveShellSegmentExecutable(segment, cwd: cwd, env: env)
else {
return []
}
@ -88,6 +87,20 @@ struct ExecCommandResolution {
cwd: cwd)
}
private static func resolveShellSegmentExecutable(
_ segment: String,
cwd: String?,
env: [String: String]?) -> ExecCommandResolution?
{
let tokens = self.tokenizeShellWords(segment)
guard !tokens.isEmpty else { return nil }
let effective = ExecEnvInvocationUnwrapper.unwrapDispatchWrappersForResolution(tokens)
guard let raw = effective.first?.trimmingCharacters(in: .whitespacesAndNewlines), !raw.isEmpty else {
return nil
}
return self.resolveExecutable(rawExecutable: raw, cwd: cwd, env: env)
}
private static func parseFirstToken(_ command: String) -> String? {
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return nil }
@ -102,6 +115,59 @@ struct ExecCommandResolution {
return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init)
}
private static func tokenizeShellWords(_ command: String) -> [String] {
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return [] }
var tokens: [String] = []
var current = ""
var inSingle = false
var inDouble = false
var escaped = false
func appendCurrent() {
guard !current.isEmpty else { return }
tokens.append(current)
current.removeAll(keepingCapacity: true)
}
for ch in trimmed {
if escaped {
current.append(ch)
escaped = false
continue
}
if ch == "\\", !inSingle {
escaped = true
continue
}
if ch == "'", !inDouble {
inSingle.toggle()
continue
}
if ch == "\"", !inSingle {
inDouble.toggle()
continue
}
if ch.isWhitespace, !inSingle, !inDouble {
appendCurrent()
continue
}
current.append(ch)
}
if escaped {
current.append("\\")
}
appendCurrent()
return tokens
}
private enum ShellTokenContext {
case unquoted
case doubleQuoted
@ -148,8 +214,14 @@ struct ExecCommandResolution {
while idx < chars.count {
let ch = chars[idx]
let next: Character? = idx + 1 < chars.count ? chars[idx + 1] : nil
let lookahead = self.nextShellSignificantCharacter(chars: chars, after: idx, inSingle: inSingle)
if escaped {
if ch == "\n" {
escaped = false
idx += 1
continue
}
current.append(ch)
escaped = false
idx += 1
@ -157,6 +229,10 @@ struct ExecCommandResolution {
}
if ch == "\\", !inSingle {
if next == "\n" {
idx += 2
continue
}
current.append(ch)
escaped = true
idx += 1
@ -177,7 +253,7 @@ struct ExecCommandResolution {
continue
}
if !inSingle, self.shouldFailClosedForShell(ch: ch, next: next, inDouble: inDouble) {
if !inSingle, self.shouldFailClosedForShell(ch: ch, next: lookahead, inDouble: inDouble) {
// Fail closed on command/process substitution in allowlist mode,
// including command substitution inside double-quoted shell strings.
return nil
@ -201,6 +277,25 @@ struct ExecCommandResolution {
return segments
}
private static func nextShellSignificantCharacter(
chars: [Character],
after idx: Int,
inSingle: Bool) -> Character?
{
guard !inSingle else {
return idx + 1 < chars.count ? chars[idx + 1] : nil
}
var cursor = idx + 1
while cursor < chars.count {
if chars[cursor] == "\\", cursor + 1 < chars.count, chars[cursor + 1] == "\n" {
cursor += 2
continue
}
return chars[cursor]
}
return nil
}
private static func shouldFailClosedForShell(ch: Character, next: Character?, inDouble: Bool) -> Bool {
let context: ShellTokenContext = inDouble ? .doubleQuoted : .unquoted
guard let rules = self.shellFailClosedRules[context] else {

View File

@ -141,6 +141,26 @@ struct ExecAllowlistTests {
#expect(resolutions.isEmpty)
}
@Test func `resolve for allowlist fails closed on line-continued command substitution`() {
let command = ["/bin/sh", "-lc", "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.isEmpty)
}
@Test func `resolve for allowlist fails closed on chained line-continued command substitution`() {
let command = ["/bin/sh", "-lc", "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.isEmpty)
}
@Test func `resolve for allowlist fails closed on quoted backticks`() {
let command = ["/bin/sh", "-lc", "echo \"ok `/usr/bin/id`\""]
let resolutions = ExecCommandResolution.resolveForAllowlist(
@ -208,6 +228,30 @@ struct ExecAllowlistTests {
#expect(resolutions[1].executableName == "touch")
}
@Test func `resolve for allowlist unwraps env dispatch wrappers inside shell segments`() {
let command = ["/bin/sh", "-lc", "env /usr/bin/touch /tmp/openclaw-allowlist-test"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "env /usr/bin/touch /tmp/openclaw-allowlist-test",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.count == 1)
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
#expect(resolutions[0].executableName == "touch")
}
@Test func `resolve for allowlist unwraps env assignments inside shell segments`() {
let command = ["/bin/sh", "-lc", "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.count == 1)
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
#expect(resolutions[0].executableName == "touch")
}
@Test func `resolve for allowlist unwraps env to effective direct executable`() {
let command = ["/usr/bin/env", "FOO=bar", "/usr/bin/printf", "ok"]
let resolutions = ExecCommandResolution.resolveForAllowlist(

View File

@ -0,0 +1,90 @@
import Foundation
import Testing
@testable import OpenClaw
struct ExecSkillBinTrustTests {
@Test func `build trust index resolves skill bin paths`() throws {
let fixture = try Self.makeExecutable(named: "jq")
defer { try? FileManager.default.removeItem(at: fixture.root) }
let trust = SkillBinsCache._testBuildTrustIndex(
report: Self.makeReport(bins: ["jq"]),
searchPaths: [fixture.root.path])
#expect(trust.names == ["jq"])
#expect(trust.pathsByName["jq"] == [fixture.path])
}
@Test func `skill auto allow accepts trusted resolved skill bin path`() throws {
let fixture = try Self.makeExecutable(named: "jq")
defer { try? FileManager.default.removeItem(at: fixture.root) }
let trust = SkillBinsCache._testBuildTrustIndex(
report: Self.makeReport(bins: ["jq"]),
searchPaths: [fixture.root.path])
let resolution = ExecCommandResolution(
rawExecutable: "jq",
resolvedPath: fixture.path,
executableName: "jq",
cwd: nil)
#expect(ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName))
}
@Test func `skill auto allow rejects same basename at different path`() throws {
let trusted = try Self.makeExecutable(named: "jq")
let untrusted = try Self.makeExecutable(named: "jq")
defer {
try? FileManager.default.removeItem(at: trusted.root)
try? FileManager.default.removeItem(at: untrusted.root)
}
let trust = SkillBinsCache._testBuildTrustIndex(
report: Self.makeReport(bins: ["jq"]),
searchPaths: [trusted.root.path])
let resolution = ExecCommandResolution(
rawExecutable: "jq",
resolvedPath: untrusted.path,
executableName: "jq",
cwd: nil)
#expect(!ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName))
}
private static func makeExecutable(named name: String) throws -> (root: URL, path: String) {
let root = FileManager.default.temporaryDirectory
.appendingPathComponent("openclaw-skill-bin-\(UUID().uuidString)", isDirectory: true)
try FileManager.default.createDirectory(at: root, withIntermediateDirectories: true)
let file = root.appendingPathComponent(name)
try "#!/bin/sh\nexit 0\n".write(to: file, atomically: true, encoding: .utf8)
try FileManager.default.setAttributes(
[.posixPermissions: NSNumber(value: Int16(0o755))],
ofItemAtPath: file.path)
return (root, file.path)
}
private static func makeReport(bins: [String]) -> SkillsStatusReport {
SkillsStatusReport(
workspaceDir: "/tmp/workspace",
managedSkillsDir: "/tmp/skills",
skills: [
SkillStatus(
name: "test-skill",
description: "test",
source: "local",
filePath: "/tmp/skills/test-skill/SKILL.md",
baseDir: "/tmp/skills/test-skill",
skillKey: "test-skill",
primaryEnv: nil,
emoji: nil,
homepage: nil,
always: false,
disabled: false,
eligible: true,
requirements: SkillRequirements(bins: bins, env: [], config: []),
missing: SkillMissing(bins: [], env: [], config: []),
configChecks: [],
install: [])
])
}
}

View File

@ -73,7 +73,7 @@ await web_search({
## Notes
- OpenClaw uses the Brave **Search** plan. If you have a legacy subscription (e.g. the original Free plan with 2,000 queries/month), it remains valid but does not include newer features like LLM Context or higher rate limits.
- Each Brave plan includes **$5/month in free credit** (renewing). The Search plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans.
- Each Brave plan includes **\$5/month in free credit** (renewing). The Search plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans.
- The Search plan includes the LLM Context endpoint and AI inference rights. Storing results to train or tune models requires a plan with explicit storage rights. See the Brave [Terms of Service](https://api-dashboard.search.brave.com/terms-of-service).
- Results are cached for 15 minutes by default (configurable via `cacheTtlMinutes`).

View File

@ -218,6 +218,55 @@ For actions/directory reads, user token can be preferred when configured. For wr
- if encoded option values exceed Slack limits, the flow falls back to buttons
- For long option payloads, Slash command argument menus use a confirm dialog before dispatching a selected value.
## Interactive replies
Slack can render agent-authored interactive reply controls, but this feature is disabled by default.
Enable it globally:
```json5
{
channels: {
slack: {
capabilities: {
interactiveReplies: true,
},
},
},
}
```
Or enable it for one Slack account only:
```json5
{
channels: {
slack: {
accounts: {
ops: {
capabilities: {
interactiveReplies: true,
},
},
},
},
},
}
```
When enabled, agents can emit Slack-only reply directives:
- `[[slack_buttons: Approve:approve, Reject:reject]]`
- `[[slack_select: Choose a target | Canary:canary, Production:production]]`
These directives compile into Slack Block Kit and route clicks or selections back through the existing Slack interaction event path.
Notes:
- This is Slack-specific UI. Other channels do not translate Slack Block Kit directives into their own button systems.
- The interactive callback values are OpenClaw-generated opaque tokens, not raw agent-authored values.
- If generated interactive blocks would exceed Slack Block Kit limits, OpenClaw falls back to the original text reply instead of sending an invalid blocks payload.
Default slash command settings:
- `enabled: false`

View File

@ -9,32 +9,32 @@ read_when:
# CI Pipeline
The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only docs or native code changed.
The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only unrelated areas changed.
## Job Overview
| Job | Purpose | When it runs |
| ----------------- | ------------------------------------------------------- | ------------------------------------------------- |
| `docs-scope` | Detect docs-only changes | Always |
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-docs PRs |
| `check` | TypeScript types, lint, format | Push to `main`, or PRs with Node-relevant changes |
| `check-docs` | Markdown lint + broken link check | Docs changed |
| `code-analysis` | LOC threshold check (1000 lines) | PRs only |
| `secrets` | Detect leaked secrets | Always |
| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes |
| `release-check` | Validate npm pack contents | After build |
| `checks` | Node/Bun tests + protocol check | Non-docs, node changes |
| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes |
| `macos` | Swift lint/build/test + TS tests | PRs with macos changes |
| `android` | Gradle build + tests | Non-docs, android changes |
| Job | Purpose | When it runs |
| ----------------- | ------------------------------------------------------- | ---------------------------------- |
| `docs-scope` | Detect docs-only changes | Always |
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-doc changes |
| `check` | TypeScript types, lint, format | Non-docs, node changes |
| `check-docs` | Markdown lint + broken link check | Docs changed |
| `secrets` | Detect leaked secrets | Always |
| `build-artifacts` | Build dist once, share with `release-check` | Pushes to `main`, node changes |
| `release-check` | Validate npm pack contents | Pushes to `main` after build |
| `checks` | Node tests + protocol check on PRs; Bun compat on push | Non-docs, node changes |
| `compat-node22` | Minimum supported Node runtime compatibility | Pushes to `main`, node changes |
| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes |
| `macos` | Swift lint/build/test + TS tests | PRs with macos changes |
| `android` | Gradle build + tests | Non-docs, android changes |
## Fail-Fast Order
Jobs are ordered so cheap checks fail before expensive ones run:
1. `docs-scope` + `code-analysis` + `check` (parallel, ~1-2 min)
2. `build-artifacts` (blocked on above)
3. `checks`, `checks-windows`, `macos`, `android` (blocked on build)
1. `docs-scope` + `changed-scope` + `check` + `secrets` (parallel, cheap gates first)
2. PRs: `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android`
3. Pushes to `main`: `build-artifacts` + `release-check` + Bun compat + `compat-node22`
Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`.

View File

@ -18,77 +18,16 @@ This endpoint is **disabled by default**. Enable it in config first.
Under the hood, requests are executed as a normal Gateway agent run (same codepath as
`openclaw agent`), so routing/permissions/config match your Gateway.
## Authentication
## Authentication, security, and routing
Uses the Gateway auth configuration. Send a bearer token:
Operational behavior matches [OpenAI Chat Completions](/gateway/openai-http-api):
- `Authorization: Bearer <token>`
- use `Authorization: Bearer <token>` with the normal Gateway auth config
- treat the endpoint as full operator access for the gateway instance
- select agents with `model: "openclaw:<agentId>"`, `model: "agent:<agentId>"`, or `x-openclaw-agent-id`
- use `x-openclaw-session-key` for explicit session routing
Notes:
- When `gateway.auth.mode="token"`, use `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`).
- When `gateway.auth.mode="password"`, use `gateway.auth.password` (or `OPENCLAW_GATEWAY_PASSWORD`).
- If `gateway.auth.rateLimit` is configured and too many auth failures occur, the endpoint returns `429` with `Retry-After`.
## Security boundary (important)
Treat this endpoint as a **full operator-access** surface for the gateway instance.
- HTTP bearer auth here is not a narrow per-user scope model.
- A valid Gateway token/password for this endpoint should be treated like an owner/operator credential.
- Requests run through the same control-plane agent path as trusted operator actions.
- There is no separate non-owner/per-user tool boundary on this endpoint; once a caller passes Gateway auth here, OpenClaw treats that caller as a trusted operator for this gateway.
- If the target agent policy allows sensitive tools, this endpoint can use them.
- Keep this endpoint on loopback/tailnet/private ingress only; do not expose it directly to the public internet.
See [Security](/gateway/security) and [Remote access](/gateway/remote).
## Choosing an agent
No custom headers required: encode the agent id in the OpenResponses `model` field:
- `model: "openclaw:<agentId>"` (example: `"openclaw:main"`, `"openclaw:beta"`)
- `model: "agent:<agentId>"` (alias)
Or target a specific OpenClaw agent by header:
- `x-openclaw-agent-id: <agentId>` (default: `main`)
Advanced:
- `x-openclaw-session-key: <sessionKey>` to fully control session routing.
## Enabling the endpoint
Set `gateway.http.endpoints.responses.enabled` to `true`:
```json5
{
gateway: {
http: {
endpoints: {
responses: { enabled: true },
},
},
},
}
```
## Disabling the endpoint
Set `gateway.http.endpoints.responses.enabled` to `false`:
```json5
{
gateway: {
http: {
endpoints: {
responses: { enabled: false },
},
},
},
}
```
Enable or disable this endpoint with `gateway.http.endpoints.responses.enabled`.
## Session behavior

View File

@ -53,8 +53,8 @@ Think of the suites as “increasing realism” (and increasing flakiness/cost):
- No real keys required
- Should be fast and stable
- Pool note:
- OpenClaw uses Vitest `vmForks` on Node 22/23 for faster unit shards.
- On Node 24+, OpenClaw automatically falls back to regular `forks` to avoid Node VM linking errors (`ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`).
- OpenClaw uses Vitest `vmForks` on Node 22, 23, and 24 for faster unit shards.
- On Node 25+, OpenClaw automatically falls back to regular `forks` until the repo is re-validated there.
- Override manually with `OPENCLAW_TEST_VM_FORKS=0` (force `forks`) or `OPENCLAW_TEST_VM_FORKS=1` (force `vmForks`).
### E2E (gateway smoke)

View File

@ -0,0 +1,138 @@
---
summary: "Shared Docker VM runtime steps for long-lived OpenClaw Gateway hosts"
read_when:
- You are deploying OpenClaw on a cloud VM with Docker
- You need the shared binary bake, persistence, and update flow
title: "Docker VM Runtime"
---
# Docker VM Runtime
Shared runtime steps for VM-based Docker installs such as GCP, Hetzner, and similar VPS providers.
## Bake required binaries into the image
Installing binaries inside a running container is a trap.
Anything installed at runtime will be lost on restart.
All external binaries required by skills must be installed at image build time.
The examples below show three common binaries only:
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
## Build and launch
```bash
docker compose build
docker compose up -d openclaw-gateway
```
If build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory.
Use a larger machine class before retrying.
Verify binaries:
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
Verify Gateway:
```bash
docker compose logs -f openclaw-gateway
```
Expected output:
```
[gateway] listening on ws://0.0.0.0:18789
```
## What persists where
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
## Updates
To update OpenClaw on the VM:
```bash
git pull
docker compose build
docker compose up -d
```

View File

@ -281,77 +281,20 @@ services:
---
## 10) Bake required binaries into the image (critical)
## 10) Shared Docker VM runtime steps
Installing binaries inside a running container is a trap.
Anything installed at runtime will be lost on restart.
Use the shared runtime guide for the common Docker host flow:
All external binaries required by skills must be installed at image build time.
The examples below show three common binaries only:
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
- [What persists where](/install/docker-vm-runtime#what-persists-where)
- [Updates](/install/docker-vm-runtime#updates)
---
## 11) Build and launch
## 11) GCP-specific launch notes
```bash
docker compose build
docker compose up -d openclaw-gateway
```
If build fails with `Killed` / `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
On GCP, if build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing:
@ -361,39 +304,7 @@ docker compose run --rm openclaw-cli config set gateway.controlUi.allowedOrigins
If you changed the gateway port, replace `18789` with your configured port.
Verify binaries:
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
---
## 12) Verify Gateway
```bash
docker compose logs -f openclaw-gateway
```
Success:
```
[gateway] listening on ws://0.0.0.0:18789
```
---
## 13) Access from your laptop
## 12) Access from your laptop
Create an SSH tunnel to forward the Gateway port:
@ -420,38 +331,8 @@ docker compose run --rm openclaw-cli devices list
docker compose run --rm openclaw-cli devices approve <requestId>
```
---
## What persists where (source of truth)
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
---
## Updates
To update OpenClaw on the VM:
```bash
cd ~/openclaw
git pull
docker compose build
docker compose up -d
```
Need the shared persistence and update reference again?
See [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where) and [Docker VM Runtime updates](/install/docker-vm-runtime#updates).
---

View File

@ -202,107 +202,20 @@ services:
---
## 7) Bake required binaries into the image (critical)
## 7) Shared Docker VM runtime steps
Installing binaries inside a running container is a trap.
Anything installed at runtime will be lost on restart.
Use the shared runtime guide for the common Docker host flow:
All external binaries required by skills must be installed at image build time.
The examples below show three common binaries only:
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
- [What persists where](/install/docker-vm-runtime#what-persists-where)
- [Updates](/install/docker-vm-runtime#updates)
---
## 8) Build and launch
## 8) Hetzner-specific access
```bash
docker compose build
docker compose up -d openclaw-gateway
```
Verify binaries:
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
---
## 9) Verify Gateway
```bash
docker compose logs -f openclaw-gateway
```
Success:
```
[gateway] listening on ws://0.0.0.0:18789
```
From your laptop:
After the shared build and launch steps, tunnel from your laptop:
```bash
ssh -N -L 18789:127.0.0.1:18789 root@YOUR_VPS_IP
@ -316,25 +229,7 @@ Paste your gateway token.
---
## What persists where (source of truth)
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
---
The shared persistence map lives in [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where).
## Infrastructure as Code (Terraform)

View File

@ -296,6 +296,12 @@ Inbound policy defaults to `disabled`. To enable inbound calls, set:
}
```
`inboundPolicy: "allowlist"` is a low-assurance caller-ID screen. The plugin
normalizes the provider-supplied `From` value and compares it to `allowFrom`.
Webhook verification authenticates provider delivery and payload integrity, but
it does not prove PSTN/VoIP caller-number ownership. Treat `allowFrom` as
caller-ID filtering, not strong caller identity.
Auto-responses use the agent system. Tune with:
- `responseModel`

View File

@ -85,8 +85,8 @@ See [Memory](/concepts/memory).
- **Kimi (Moonshot)**: `KIMI_API_KEY`, `MOONSHOT_API_KEY`, or `tools.web.search.kimi.apiKey`
- **Perplexity Search API**: `PERPLEXITY_API_KEY`, `OPENROUTER_API_KEY`, or `tools.web.search.perplexity.apiKey`
**Brave Search free credit:** Each Brave plan includes $5/month in renewing
free credit. The Search plan costs $5 per 1,000 requests, so the credit covers
**Brave Search free credit:** Each Brave plan includes \$5/month in renewing
free credit. The Search plan costs \$5 per 1,000 requests, so the credit covers
1,000 requests/month at no charge. Set your usage limit in the Brave dashboard
to avoid unexpected charges.

View File

@ -11,7 +11,7 @@ title: "Tests"
- `pnpm test:force`: Kills any lingering gateway process holding the default control port, then runs the full Vitest suite with an isolated gateway port so server tests dont collide with a running instance. Use this when a prior gateway run left port 18789 occupied.
- `pnpm test:coverage`: Runs the unit suite with V8 coverage (via `vitest.unit.config.ts`). Global thresholds are 70% lines/branches/functions/statements. Coverage excludes integration-heavy entrypoints (CLI wiring, gateway/telegram bridges, webchat static server) to keep the target focused on unit-testable logic.
- `pnpm test` on Node 24+: OpenClaw auto-disables Vitest `vmForks` and uses `forks` to avoid `ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`.
- `pnpm test` on Node 22, 23, and 24 uses Vitest `vmForks` by default for faster startup. Node 25+ falls back to `forks` until re-validated. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`.
- `pnpm test`: runs the fast core unit lane by default for quick local feedback.
- `pnpm test:channels`: runs channel-heavy suites.
- `pnpm test:extensions`: runs extension/plugin suites.

View File

@ -167,93 +167,8 @@ openclaw onboard --non-interactive \
`--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts.
</Note>
<AccordionGroup>
<Accordion title="Gemini example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice gemini-api-key \
--gemini-api-key "$GEMINI_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Z.AI example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice zai-api-key \
--zai-api-key "$ZAI_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Vercel AI Gateway example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice ai-gateway-api-key \
--ai-gateway-api-key "$AI_GATEWAY_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Cloudflare AI Gateway example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice cloudflare-ai-gateway-api-key \
--cloudflare-ai-gateway-account-id "your-account-id" \
--cloudflare-ai-gateway-gateway-id "your-gateway-id" \
--cloudflare-ai-gateway-api-key "$CLOUDFLARE_AI_GATEWAY_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Moonshot example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice moonshot-api-key \
--moonshot-api-key "$MOONSHOT_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Synthetic example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice synthetic-api-key \
--synthetic-api-key "$SYNTHETIC_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="OpenCode example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice opencode-zen \
--opencode-zen-api-key "$OPENCODE_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog.
</Accordion>
<Accordion title="Ollama example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice ollama \
--custom-model-id "qwen3.5:27b" \
--accept-risk \
--gateway-port 18789 \
--gateway-bind loopback
```
Add `--custom-base-url "http://ollama-host:11434"` to target a remote Ollama instance.
</Accordion>
</AccordionGroup>
Provider-specific command examples live in [CLI Automation](/start/wizard-cli-automation#provider-specific-examples).
Use this reference page for flag semantics and step ordering.
### Add agent (non-interactive)

View File

@ -48,6 +48,8 @@ Gateway.
- `openclaw`: managed, isolated browser (no extension required).
- `chrome`: extension relay to your **system browser** (requires the OpenClaw
extension to be attached to a tab).
- `existing-session`: official Chrome MCP attach flow for a running Chrome
profile.
Set `browser.defaultProfile: "openclaw"` if you want managed mode by default.
@ -77,6 +79,12 @@ Browser settings live in `~/.openclaw/openclaw.json`.
profiles: {
openclaw: { cdpPort: 18800, color: "#FF4500" },
work: { cdpPort: 18801, color: "#0066CC" },
chromeLive: {
cdpPort: 18802,
driver: "existing-session",
attachOnly: true,
color: "#00AA00",
},
remote: { cdpUrl: "http://10.0.0.42:9222", color: "#00AA00" },
},
},
@ -100,6 +108,8 @@ Notes:
- Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "chrome"` to opt into the Chrome extension relay.
- Auto-detect order: system default browser if Chromium-based; otherwise Chrome → Brave → Edge → Chromium → Chrome Canary.
- Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl` — set those only for remote CDP.
- `driver: "existing-session"` uses Chrome DevTools MCP instead of raw CDP. Do
not set `cdpUrl` for that driver.
## Use Brave (or another Chromium-based browser)
@ -264,11 +274,13 @@ OpenClaw supports multiple named profiles (routing configs). Profiles can be:
- **openclaw-managed**: a dedicated Chromium-based browser instance with its own user data directory + CDP port
- **remote**: an explicit CDP URL (Chromium-based browser running elsewhere)
- **extension relay**: your existing Chrome tab(s) via the local relay + Chrome extension
- **existing session**: your existing Chrome profile via Chrome DevTools MCP auto-connect
Defaults:
- The `openclaw` profile is auto-created if missing.
- The `chrome` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default).
- Existing-session profiles are opt-in; create them with `--driver existing-session`.
- Local CDP ports allocate from **1880018899** by default.
- Deleting a profile moves its local data directory to Trash.
@ -328,6 +340,66 @@ Notes:
- This mode relies on Playwright-on-CDP for most operations (screenshots/snapshots/actions).
- Detach by clicking the extension icon again.
## Chrome existing-session via MCP
OpenClaw can also attach to a running Chrome profile through the official
Chrome DevTools MCP server. This reuses the tabs and login state already open in
that Chrome profile.
Official background and setup references:
- [Chrome for Developers: Use Chrome DevTools MCP with your browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
- [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp)
Create a profile:
```bash
openclaw browser create-profile \
--name chrome-live \
--driver existing-session \
--color "#00AA00"
```
Then in Chrome:
1. Open `chrome://inspect/#remote-debugging`
2. Enable remote debugging
3. Keep Chrome running and approve the connection prompt when OpenClaw attaches
Live attach smoke test:
```bash
openclaw browser --browser-profile chrome-live start
openclaw browser --browser-profile chrome-live status
openclaw browser --browser-profile chrome-live tabs
openclaw browser --browser-profile chrome-live snapshot --format ai
```
What success looks like:
- `status` shows `driver: existing-session`
- `status` shows `running: true`
- `tabs` lists your already-open Chrome tabs
- `snapshot` returns refs from the selected live tab
What to check if attach does not work:
- Chrome is version `144+`
- remote debugging is enabled at `chrome://inspect/#remote-debugging`
- Chrome showed and you accepted the attach consent prompt
- the Gateway or node host can spawn `npx chrome-devtools-mcp@latest --autoConnect`
Notes:
- This path is higher-risk than the isolated `openclaw` profile because it can
act inside your signed-in browser session.
- OpenClaw does not launch Chrome for this driver; it attaches to an existing
session only.
- OpenClaw uses the official Chrome DevTools MCP `--autoConnect` flow here, not
the legacy default-profile remote debugging port workflow.
- Some features still require the extension relay or managed browser path, such
as PDF export and download interception.
- Leave the relay loopback-only by default. If the relay must be reachable from a different network namespace (for example Gateway in WSL2, Chrome on Windows), set `browser.relayBindHost` to an explicit bind address such as `0.0.0.0` while keeping the surrounding network private and authenticated.
WSL2 / cross-namespace example:

View File

@ -13,6 +13,13 @@ The OpenClaw Chrome extension lets the agent control your **existing Chrome tabs
Attach/detach happens via a **single Chrome toolbar button**.
If you want Chromes official DevTools MCP attach flow instead of the OpenClaw
extension relay, use an `existing-session` browser profile instead. See
[Browser](/tools/browser#chrome-existing-session-via-mcp). For Chromes own
setup docs, see [Chrome for Developers: Use Chrome DevTools MCP with your
browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
and the [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp).
## What it is (concept)
There are three parts:

View File

@ -65,8 +65,8 @@ Use `openclaw configure --section web` to set up your API key and choose a provi
2. In the dashboard, choose the **Search** plan and generate an API key.
3. Run `openclaw configure --section web` to store the key in config, or set `BRAVE_API_KEY` in your environment.
Each Brave plan includes **$5/month in free credit** (renewing). The Search
plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set
Each Brave plan includes **\$5/month in free credit** (renewing). The Search
plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set
your usage limit in the Brave dashboard to avoid unexpected charges. See the
[Brave API portal](https://brave.com/search/api/) for current plans and
pricing.

View File

@ -54,6 +54,49 @@ describe("acpx ensure", () => {
}
});
function mockEnsureInstallFlow() {
spawnAndCollectMock
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
}
function expectEnsureInstallCalls(stripProviderAuthEnvVars?: boolean) {
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
}
it("accepts the pinned acpx version", async () => {
spawnAndCollectMock.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
@ -177,25 +220,7 @@ describe("acpx ensure", () => {
});
it("installs and verifies pinned acpx when precheck fails", async () => {
spawnAndCollectMock
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
mockEnsureInstallFlow();
await ensureAcpx({
command: "/plugin/node_modules/.bin/acpx",
@ -204,33 +229,11 @@ describe("acpx ensure", () => {
});
expect(spawnAndCollectMock).toHaveBeenCalledTimes(3);
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
});
expectEnsureInstallCalls();
});
it("threads stripProviderAuthEnvVars through version probes and install", async () => {
spawnAndCollectMock
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
mockEnsureInstallFlow();
await ensureAcpx({
command: "/plugin/node_modules/.bin/acpx",
@ -239,24 +242,7 @@ describe("acpx ensure", () => {
stripProviderAuthEnvVars: true,
});
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
expectEnsureInstallCalls(true);
});
it("fails with actionable error when npm install fails", async () => {

View File

@ -1,9 +1,7 @@
import crypto from "node:crypto";
import { createServer } from "node:http";
import type { AddressInfo } from "node:net";
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
import { afterEach, describe, expect, it, vi } from "vitest";
import { createFeishuRuntimeMockModule } from "./monitor.test-mocks.js";
import { withRunningWebhookMonitor } from "./monitor.webhook.test-helpers.js";
const probeFeishuMock = vi.hoisted(() => vi.fn());
@ -23,61 +21,6 @@ vi.mock("./runtime.js", () => createFeishuRuntimeMockModule());
import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js";
async function getFreePort(): Promise<number> {
const server = createServer();
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
const address = server.address() as AddressInfo | null;
if (!address) {
throw new Error("missing server address");
}
await new Promise<void>((resolve) => server.close(() => resolve()));
return address.port;
}
async function waitUntilServerReady(url: string): Promise<void> {
for (let i = 0; i < 50; i += 1) {
try {
const response = await fetch(url, { method: "GET" });
if (response.status >= 200 && response.status < 500) {
return;
}
} catch {
// retry
}
await new Promise((resolve) => setTimeout(resolve, 20));
}
throw new Error(`server did not start: ${url}`);
}
function buildConfig(params: {
accountId: string;
path: string;
port: number;
verificationToken?: string;
encryptKey?: string;
}): ClawdbotConfig {
return {
channels: {
feishu: {
enabled: true,
accounts: {
[params.accountId]: {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "webhook",
webhookHost: "127.0.0.1",
webhookPort: params.port,
webhookPath: params.path,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
},
},
},
},
} as ClawdbotConfig;
}
function signFeishuPayload(params: {
encryptKey: string;
payload: Record<string, unknown>;
@ -107,43 +50,6 @@ function encryptFeishuPayload(encryptKey: string, payload: Record<string, unknow
return Buffer.concat([iv, encrypted]).toString("base64");
}
async function withRunningWebhookMonitor(
params: {
accountId: string;
path: string;
verificationToken: string;
encryptKey: string;
},
run: (url: string) => Promise<void>,
) {
const port = await getFreePort();
const cfg = buildConfig({
accountId: params.accountId,
path: params.path,
port,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
});
const abortController = new AbortController();
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
const monitorPromise = monitorFeishuProvider({
config: cfg,
runtime,
abortSignal: abortController.signal,
});
const url = `http://127.0.0.1:${port}${params.path}`;
await waitUntilServerReady(url);
try {
await run(url);
} finally {
abortController.abort();
await monitorPromise;
}
}
afterEach(() => {
stopFeishuMonitor();
});
@ -159,6 +65,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const payload = { type: "url_verification", challenge: "challenge-token" };
const response = await fetch(url, {
@ -185,6 +92,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const response = await fetch(url, {
method: "POST",
@ -208,6 +116,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const response = await fetch(url, {
method: "POST",
@ -231,6 +140,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const payload = { type: "url_verification", challenge: "challenge-token" };
const response = await fetch(url, {
@ -255,6 +165,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const payload = {
schema: "2.0",
@ -283,6 +194,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const payload = {
encrypt: encryptFeishuPayload("encrypt_key", {

View File

@ -1,11 +1,13 @@
import { createServer } from "node:http";
import type { AddressInfo } from "node:net";
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
import { afterEach, describe, expect, it, vi } from "vitest";
import {
createFeishuClientMockModule,
createFeishuRuntimeMockModule,
} from "./monitor.test-mocks.js";
import {
buildWebhookConfig,
getFreePort,
withRunningWebhookMonitor,
} from "./monitor.webhook.test-helpers.js";
const probeFeishuMock = vi.hoisted(() => vi.fn());
@ -33,98 +35,6 @@ import {
stopFeishuMonitor,
} from "./monitor.js";
async function getFreePort(): Promise<number> {
const server = createServer();
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
const address = server.address() as AddressInfo | null;
if (!address) {
throw new Error("missing server address");
}
await new Promise<void>((resolve) => server.close(() => resolve()));
return address.port;
}
async function waitUntilServerReady(url: string): Promise<void> {
for (let i = 0; i < 50; i += 1) {
try {
const response = await fetch(url, { method: "GET" });
if (response.status >= 200 && response.status < 500) {
return;
}
} catch {
// retry
}
await new Promise((resolve) => setTimeout(resolve, 20));
}
throw new Error(`server did not start: ${url}`);
}
function buildConfig(params: {
accountId: string;
path: string;
port: number;
verificationToken?: string;
encryptKey?: string;
}): ClawdbotConfig {
return {
channels: {
feishu: {
enabled: true,
accounts: {
[params.accountId]: {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "webhook",
webhookHost: "127.0.0.1",
webhookPort: params.port,
webhookPath: params.path,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
},
},
},
},
} as ClawdbotConfig;
}
async function withRunningWebhookMonitor(
params: {
accountId: string;
path: string;
verificationToken: string;
encryptKey: string;
},
run: (url: string) => Promise<void>,
) {
const port = await getFreePort();
const cfg = buildConfig({
accountId: params.accountId,
path: params.path,
port,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
});
const abortController = new AbortController();
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
const monitorPromise = monitorFeishuProvider({
config: cfg,
runtime,
abortSignal: abortController.signal,
});
const url = `http://127.0.0.1:${port}${params.path}`;
await waitUntilServerReady(url);
try {
await run(url);
} finally {
abortController.abort();
await monitorPromise;
}
}
afterEach(() => {
clearFeishuWebhookRateLimitStateForTest();
stopFeishuMonitor();
@ -134,7 +44,7 @@ describe("Feishu webhook security hardening", () => {
it("rejects webhook mode without verificationToken", async () => {
probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" });
const cfg = buildConfig({
const cfg = buildWebhookConfig({
accountId: "missing-token",
path: "/hook-missing-token",
port: await getFreePort(),
@ -148,7 +58,7 @@ describe("Feishu webhook security hardening", () => {
it("rejects webhook mode without encryptKey", async () => {
probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" });
const cfg = buildConfig({
const cfg = buildWebhookConfig({
accountId: "missing-encrypt-key",
path: "/hook-missing-encrypt",
port: await getFreePort(),
@ -167,6 +77,7 @@ describe("Feishu webhook security hardening", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const response = await fetch(url, {
method: "POST",
@ -189,6 +100,7 @@ describe("Feishu webhook security hardening", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
let saw429 = false;
for (let i = 0; i < 130; i += 1) {

View File

@ -0,0 +1,98 @@
import { createServer } from "node:http";
import type { AddressInfo } from "node:net";
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
import { vi } from "vitest";
import type { monitorFeishuProvider } from "./monitor.js";
export async function getFreePort(): Promise<number> {
const server = createServer();
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
const address = server.address() as AddressInfo | null;
if (!address) {
throw new Error("missing server address");
}
await new Promise<void>((resolve) => server.close(() => resolve()));
return address.port;
}
async function waitUntilServerReady(url: string): Promise<void> {
for (let i = 0; i < 50; i += 1) {
try {
const response = await fetch(url, { method: "GET" });
if (response.status >= 200 && response.status < 500) {
return;
}
} catch {
// retry
}
await new Promise((resolve) => setTimeout(resolve, 20));
}
throw new Error(`server did not start: ${url}`);
}
export function buildWebhookConfig(params: {
accountId: string;
path: string;
port: number;
verificationToken?: string;
encryptKey?: string;
}): ClawdbotConfig {
return {
channels: {
feishu: {
enabled: true,
accounts: {
[params.accountId]: {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "webhook",
webhookHost: "127.0.0.1",
webhookPort: params.port,
webhookPath: params.path,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
},
},
},
},
} as ClawdbotConfig;
}
export async function withRunningWebhookMonitor(
params: {
accountId: string;
path: string;
verificationToken: string;
encryptKey: string;
},
monitor: typeof monitorFeishuProvider,
run: (url: string) => Promise<void>,
) {
const port = await getFreePort();
const cfg = buildWebhookConfig({
accountId: params.accountId,
path: params.path,
port,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
});
const abortController = new AbortController();
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
const monitorPromise = monitor({
config: cfg,
runtime,
abortSignal: abortController.signal,
});
const url = `http://127.0.0.1:${port}${params.path}`;
await waitUntilServerReady(url);
try {
await run(url);
} finally {
abortController.abort();
await monitorPromise;
}
}

View File

@ -7,6 +7,9 @@
"dependencies": {
"google-auth-library": "^10.6.1"
},
"devDependencies": {
"openclaw": "workspace:*"
},
"peerDependencies": {
"openclaw": ">=2026.3.11"
},

View File

@ -27,6 +27,28 @@ function createMockFetch(response?: { status?: number; body?: unknown; contentTy
return { mockFetch: mockFetch as unknown as typeof fetch, calls };
}
function createTestClient(response?: { status?: number; body?: unknown; contentType?: string }) {
const { mockFetch, calls } = createMockFetch(response);
const client = createMattermostClient({
baseUrl: "http://localhost:8065",
botToken: "tok",
fetchImpl: mockFetch,
});
return { client, calls };
}
async function updatePostAndCapture(
update: Parameters<typeof updateMattermostPost>[2],
response?: { status?: number; body?: unknown; contentType?: string },
) {
const { client, calls } = createTestClient(response ?? { body: { id: "post1" } });
await updateMattermostPost(client, "post1", update);
return {
calls,
body: JSON.parse(calls[0].init?.body as string) as Record<string, unknown>,
};
}
// ── normalizeMattermostBaseUrl ────────────────────────────────────────
describe("normalizeMattermostBaseUrl", () => {
@ -229,68 +251,38 @@ describe("createMattermostPost", () => {
describe("updateMattermostPost", () => {
it("sends PUT to /posts/{id}", async () => {
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
const client = createMattermostClient({
baseUrl: "http://localhost:8065",
botToken: "tok",
fetchImpl: mockFetch,
});
await updateMattermostPost(client, "post1", { message: "Updated" });
const { calls } = await updatePostAndCapture({ message: "Updated" });
expect(calls[0].url).toContain("/posts/post1");
expect(calls[0].init?.method).toBe("PUT");
});
it("includes post id in the body", async () => {
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
const client = createMattermostClient({
baseUrl: "http://localhost:8065",
botToken: "tok",
fetchImpl: mockFetch,
});
await updateMattermostPost(client, "post1", { message: "Updated" });
const body = JSON.parse(calls[0].init?.body as string);
const { body } = await updatePostAndCapture({ message: "Updated" });
expect(body.id).toBe("post1");
expect(body.message).toBe("Updated");
});
it("includes props for button completion updates", async () => {
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
const client = createMattermostClient({
baseUrl: "http://localhost:8065",
botToken: "tok",
fetchImpl: mockFetch,
});
await updateMattermostPost(client, "post1", {
const { body } = await updatePostAndCapture({
message: "Original message",
props: {
attachments: [{ text: "✓ **do_now** selected by @tony" }],
},
});
const body = JSON.parse(calls[0].init?.body as string);
expect(body.message).toBe("Original message");
expect(body.props.attachments[0].text).toContain("✓");
expect(body.props.attachments[0].text).toContain("do_now");
expect(body.props).toMatchObject({
attachments: [{ text: expect.stringContaining("✓") }],
});
expect(body.props).toMatchObject({
attachments: [{ text: expect.stringContaining("do_now") }],
});
});
it("omits message when not provided", async () => {
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
const client = createMattermostClient({
baseUrl: "http://localhost:8065",
botToken: "tok",
fetchImpl: mockFetch,
});
await updateMattermostPost(client, "post1", {
const { body } = await updatePostAndCapture({
props: { attachments: [] },
});
const body = JSON.parse(calls[0].init?.body as string);
expect(body.id).toBe("post1");
expect(body.message).toBeUndefined();
expect(body.props).toEqual({ attachments: [] });

View File

@ -496,6 +496,104 @@ describe("createMattermostInteractionHandler", () => {
return res as unknown as ServerResponse & { headers: Record<string, string>; body: string };
}
function createActionContext(actionId = "approve", channelId = "chan-1") {
const context = { action_id: actionId, __openclaw_channel_id: channelId };
return { context, token: generateInteractionToken(context, "acct") };
}
function createInteractionBody(params: {
context: Record<string, unknown>;
token: string;
channelId?: string;
postId?: string;
userId?: string;
userName?: string;
}) {
return {
user_id: params.userId ?? "user-1",
...(params.userName ? { user_name: params.userName } : {}),
channel_id: params.channelId ?? "chan-1",
post_id: params.postId ?? "post-1",
context: { ...params.context, _token: params.token },
};
}
async function runHandler(
handler: ReturnType<typeof createMattermostInteractionHandler>,
params: {
body: unknown;
remoteAddress?: string;
headers?: Record<string, string>;
},
) {
const req = createReq({
remoteAddress: params.remoteAddress,
headers: params.headers,
body: params.body,
});
const res = createRes();
await handler(req, res);
return res;
}
function expectForbiddenResponse(
res: ServerResponse & { body: string },
expectedMessage: string,
) {
expect(res.statusCode).toBe(403);
expect(res.body).toContain(expectedMessage);
}
function expectSuccessfulApprovalUpdate(
res: ServerResponse & { body: string },
requestLog?: Array<{ path: string; method?: string }>,
) {
expect(res.statusCode).toBe(200);
expect(res.body).toBe("{}");
if (requestLog) {
expect(requestLog).toEqual([
{ path: "/posts/post-1", method: undefined },
{ path: "/posts/post-1", method: "PUT" },
]);
}
}
function createActionPost(params?: {
actionId?: string;
actionName?: string;
channelId?: string;
rootId?: string;
}): MattermostPost {
return {
id: "post-1",
channel_id: params?.channelId ?? "chan-1",
...(params?.rootId ? { root_id: params.rootId } : {}),
message: "Choose",
props: {
attachments: [
{
actions: [
{
id: params?.actionId ?? "approve",
name: params?.actionName ?? "Approve",
},
],
},
],
},
};
}
function createUnusedInteractionHandler() {
return createMattermostInteractionHandler({
client: {
request: async () => ({ message: "unused" }),
} as unknown as MattermostClient,
botUserId: "bot",
accountId: "acct",
});
}
async function runApproveInteraction(params?: {
actionName?: string;
allowedSourceIps?: string[];
@ -503,8 +601,7 @@ describe("createMattermostInteractionHandler", () => {
remoteAddress?: string;
headers?: Record<string, string>;
}) {
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext();
const requestLog: Array<{ path: string; method?: string }> = [];
const handler = createMattermostInteractionHandler({
client: {
@ -513,15 +610,7 @@ describe("createMattermostInteractionHandler", () => {
if (init?.method === "PUT") {
return { id: "post-1" };
}
return {
channel_id: "chan-1",
message: "Choose",
props: {
attachments: [
{ actions: [{ id: "approve", name: params?.actionName ?? "Approve" }] },
],
},
};
return createActionPost({ actionName: params?.actionName });
},
} as unknown as MattermostClient,
botUserId: "bot",
@ -530,50 +619,27 @@ describe("createMattermostInteractionHandler", () => {
trustedProxies: params?.trustedProxies,
});
const req = createReq({
const res = await runHandler(handler, {
remoteAddress: params?.remoteAddress,
headers: params?.headers,
body: {
user_id: "user-1",
user_name: "alice",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
body: createInteractionBody({ context, token, userName: "alice" }),
});
const res = createRes();
await handler(req, res);
return { res, requestLog };
}
async function runInvalidActionRequest(actionId: string) {
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext();
const handler = createMattermostInteractionHandler({
client: {
request: async () => ({
channel_id: "chan-1",
message: "Choose",
props: {
attachments: [{ actions: [{ id: actionId, name: actionId }] }],
},
}),
request: async () => createActionPost({ actionId, actionName: actionId }),
} as unknown as MattermostClient,
botUserId: "bot",
accountId: "acct",
});
const req = createReq({
body: {
user_id: "user-1",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
return await runHandler(handler, {
body: createInteractionBody({ context, token }),
});
const res = createRes();
await handler(req, res);
return res;
}
it("accepts callback requests from an allowlisted source IP", async () => {
@ -582,12 +648,7 @@ describe("createMattermostInteractionHandler", () => {
remoteAddress: "198.51.100.8",
});
expect(res.statusCode).toBe(200);
expect(res.body).toBe("{}");
expect(requestLog).toEqual([
{ path: "/posts/post-1", method: undefined },
{ path: "/posts/post-1", method: "PUT" },
]);
expectSuccessfulApprovalUpdate(res, requestLog);
});
it("accepts forwarded Mattermost source IPs from a trusted proxy", async () => {
@ -603,8 +664,7 @@ describe("createMattermostInteractionHandler", () => {
});
it("rejects callback requests from non-allowlisted source IPs", async () => {
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext();
const handler = createMattermostInteractionHandler({
client: {
request: async () => {
@ -616,33 +676,17 @@ describe("createMattermostInteractionHandler", () => {
allowedSourceIps: ["127.0.0.1"],
});
const req = createReq({
const res = await runHandler(handler, {
remoteAddress: "198.51.100.8",
body: {
user_id: "user-1",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
body: createInteractionBody({ context, token }),
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(403);
expect(res.body).toContain("Forbidden origin");
expectForbiddenResponse(res, "Forbidden origin");
});
it("rejects requests with an invalid interaction token", async () => {
const handler = createMattermostInteractionHandler({
client: {
request: async () => ({ message: "unused" }),
} as unknown as MattermostClient,
botUserId: "bot",
accountId: "acct",
});
const handler = createUnusedInteractionHandler();
const req = createReq({
const res = await runHandler(handler, {
body: {
user_id: "user-1",
channel_id: "chan-1",
@ -650,72 +694,33 @@ describe("createMattermostInteractionHandler", () => {
context: { action_id: "approve", _token: "deadbeef" },
},
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(403);
expect(res.body).toContain("Invalid token");
expectForbiddenResponse(res, "Invalid token");
});
it("rejects requests when the signed channel does not match the callback payload", async () => {
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const handler = createMattermostInteractionHandler({
client: {
request: async () => ({ message: "unused" }),
} as unknown as MattermostClient,
botUserId: "bot",
accountId: "acct",
const { context, token } = createActionContext();
const handler = createUnusedInteractionHandler();
const res = await runHandler(handler, {
body: createInteractionBody({ context, token, channelId: "chan-2" }),
});
const req = createReq({
body: {
user_id: "user-1",
channel_id: "chan-2",
post_id: "post-1",
context: { ...context, _token: token },
},
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(403);
expect(res.body).toContain("Channel mismatch");
expectForbiddenResponse(res, "Channel mismatch");
});
it("rejects requests when the fetched post does not belong to the callback channel", async () => {
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext();
const handler = createMattermostInteractionHandler({
client: {
request: async () => ({
channel_id: "chan-9",
message: "Choose",
props: {
attachments: [{ actions: [{ id: "approve", name: "Approve" }] }],
},
}),
request: async () => createActionPost({ channelId: "chan-9" }),
} as unknown as MattermostClient,
botUserId: "bot",
accountId: "acct",
});
const req = createReq({
body: {
user_id: "user-1",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
const res = await runHandler(handler, {
body: createInteractionBody({ context, token }),
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(403);
expect(res.body).toContain("Post/channel mismatch");
expectForbiddenResponse(res, "Post/channel mismatch");
});
it("rejects requests when the action is not present on the fetched post", async () => {
@ -730,12 +735,7 @@ describe("createMattermostInteractionHandler", () => {
actionName: "approve",
});
expect(res.statusCode).toBe(200);
expect(res.body).toBe("{}");
expect(requestLog).toEqual([
{ path: "/posts/post-1", method: undefined },
{ path: "/posts/post-1", method: "PUT" },
]);
expectSuccessfulApprovalUpdate(res, requestLog);
});
it("forwards fetched post threading metadata to session and button callbacks", async () => {
@ -745,19 +745,10 @@ describe("createMattermostInteractionHandler", () => {
enqueueSystemEvent,
},
} as unknown as Parameters<typeof setMattermostRuntime>[0]);
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext();
const resolveSessionKey = vi.fn().mockResolvedValue("session:thread:root-9");
const dispatchButtonClick = vi.fn();
const fetchedPost: MattermostPost = {
id: "post-1",
channel_id: "chan-1",
root_id: "root-9",
message: "Choose",
props: {
attachments: [{ actions: [{ id: "approve", name: "Approve" }] }],
},
};
const fetchedPost = createActionPost({ rootId: "root-9" });
const handler = createMattermostInteractionHandler({
client: {
request: async (_path: string, init?: { method?: string }) =>
@ -769,19 +760,9 @@ describe("createMattermostInteractionHandler", () => {
dispatchButtonClick,
});
const req = createReq({
body: {
user_id: "user-1",
user_name: "alice",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
const res = await runHandler(handler, {
body: createInteractionBody({ context, token, userName: "alice" }),
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(200);
expect(resolveSessionKey).toHaveBeenCalledWith({
channelId: "chan-1",
@ -803,8 +784,7 @@ describe("createMattermostInteractionHandler", () => {
});
it("lets a custom interaction handler short-circuit generic completion updates", async () => {
const context = { action_id: "mdlprov", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext("mdlprov");
const requestLog: Array<{ path: string; method?: string }> = [];
const handleInteraction = vi.fn().mockResolvedValue({
ephemeral_text: "Only the original requester can use this picker.",
@ -814,14 +794,10 @@ describe("createMattermostInteractionHandler", () => {
client: {
request: async (path: string, init?: { method?: string }) => {
requestLog.push({ path, method: init?.method });
return {
id: "post-1",
channel_id: "chan-1",
message: "Choose",
props: {
attachments: [{ actions: [{ id: "mdlprov", name: "Browse providers" }] }],
},
};
return createActionPost({
actionId: "mdlprov",
actionName: "Browse providers",
});
},
} as unknown as MattermostClient,
botUserId: "bot",
@ -830,18 +806,14 @@ describe("createMattermostInteractionHandler", () => {
dispatchButtonClick,
});
const req = createReq({
body: {
user_id: "user-2",
user_name: "alice",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
const res = await runHandler(handler, {
body: createInteractionBody({
context,
token,
userId: "user-2",
userName: "alice",
}),
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(200);
expect(res.body).toBe(

View File

@ -16,6 +16,35 @@ const accountFixture: ResolvedMattermostAccount = {
config: {},
};
function authorizeGroupCommand(senderId: string) {
return authorizeMattermostCommandInvocation({
account: {
...accountFixture,
config: {
groupPolicy: "allowlist",
allowFrom: ["trusted-user"],
},
},
cfg: {
commands: {
useAccessGroups: true,
},
},
senderId,
senderName: senderId,
channelId: "chan-1",
channelInfo: {
id: "chan-1",
type: "O",
name: "general",
display_name: "General",
},
storeAllowFrom: [],
allowTextCommands: true,
hasControlCommand: true,
});
}
describe("mattermost monitor authz", () => {
it("keeps DM allowlist merged with pairing-store entries", () => {
const resolved = resolveMattermostEffectiveAllowFromLists({
@ -72,32 +101,7 @@ describe("mattermost monitor authz", () => {
});
it("denies group control commands when the sender is outside the allowlist", () => {
const decision = authorizeMattermostCommandInvocation({
account: {
...accountFixture,
config: {
groupPolicy: "allowlist",
allowFrom: ["trusted-user"],
},
},
cfg: {
commands: {
useAccessGroups: true,
},
},
senderId: "attacker",
senderName: "attacker",
channelId: "chan-1",
channelInfo: {
id: "chan-1",
type: "O",
name: "general",
display_name: "General",
},
storeAllowFrom: [],
allowTextCommands: true,
hasControlCommand: true,
});
const decision = authorizeGroupCommand("attacker");
expect(decision).toMatchObject({
ok: false,
@ -107,32 +111,7 @@ describe("mattermost monitor authz", () => {
});
it("authorizes group control commands for allowlisted senders", () => {
const decision = authorizeMattermostCommandInvocation({
account: {
...accountFixture,
config: {
groupPolicy: "allowlist",
allowFrom: ["trusted-user"],
},
},
cfg: {
commands: {
useAccessGroups: true,
},
},
senderId: "trusted-user",
senderName: "trusted-user",
channelId: "chan-1",
channelInfo: {
id: "chan-1",
type: "O",
name: "general",
display_name: "General",
},
storeAllowFrom: [],
allowTextCommands: true,
hasControlCommand: true,
});
const decision = authorizeGroupCommand("trusted-user");
expect(decision).toMatchObject({
ok: true,

View File

@ -14,6 +14,28 @@ describe("mattermost reactions", () => {
resetMattermostReactionBotUserCacheForTests();
});
async function addReactionWithFetch(
fetchMock: ReturnType<typeof createMattermostReactionFetchMock>,
) {
return addMattermostReaction({
cfg: createMattermostTestConfig(),
postId: "POST1",
emojiName: "thumbsup",
fetchImpl: fetchMock as unknown as typeof fetch,
});
}
async function removeReactionWithFetch(
fetchMock: ReturnType<typeof createMattermostReactionFetchMock>,
) {
return removeMattermostReaction({
cfg: createMattermostTestConfig(),
postId: "POST1",
emojiName: "thumbsup",
fetchImpl: fetchMock as unknown as typeof fetch,
});
}
it("adds reactions by calling /users/me then POST /reactions", async () => {
const fetchMock = createMattermostReactionFetchMock({
mode: "add",
@ -21,12 +43,7 @@ describe("mattermost reactions", () => {
emojiName: "thumbsup",
});
const result = await addMattermostReaction({
cfg: createMattermostTestConfig(),
postId: "POST1",
emojiName: "thumbsup",
fetchImpl: fetchMock as unknown as typeof fetch,
});
const result = await addReactionWithFetch(fetchMock);
expect(result).toEqual({ ok: true });
expect(fetchMock).toHaveBeenCalled();
@ -41,12 +58,7 @@ describe("mattermost reactions", () => {
body: { id: "err", message: "boom" },
});
const result = await addMattermostReaction({
cfg: createMattermostTestConfig(),
postId: "POST1",
emojiName: "thumbsup",
fetchImpl: fetchMock as unknown as typeof fetch,
});
const result = await addReactionWithFetch(fetchMock);
expect(result.ok).toBe(false);
if (!result.ok) {
@ -61,12 +73,7 @@ describe("mattermost reactions", () => {
emojiName: "thumbsup",
});
const result = await removeMattermostReaction({
cfg: createMattermostTestConfig(),
postId: "POST1",
emojiName: "thumbsup",
fetchImpl: fetchMock as unknown as typeof fetch,
});
const result = await removeReactionWithFetch(fetchMock);
expect(result).toEqual({ ok: true });
expect(fetchMock).toHaveBeenCalled();

View File

@ -10,6 +10,25 @@ import {
} from "./slash-commands.js";
describe("slash-commands", () => {
async function registerSingleStatusCommand(
request: (path: string, init?: { method?: string }) => Promise<unknown>,
) {
const client = { request } as unknown as MattermostClient;
return registerSlashCommands({
client,
teamId: "team-1",
creatorUserId: "bot-user",
callbackUrl: "http://gateway/callback",
commands: [
{
trigger: "oc_status",
description: "status",
autoComplete: true,
},
],
});
}
it("parses application/x-www-form-urlencoded payloads", () => {
const payload = parseSlashCommandPayload(
"token=t1&team_id=team&channel_id=ch1&user_id=u1&command=%2Foc_status&text=now",
@ -101,21 +120,7 @@ describe("slash-commands", () => {
}
throw new Error(`unexpected request path: ${path}`);
});
const client = { request } as unknown as MattermostClient;
const result = await registerSlashCommands({
client,
teamId: "team-1",
creatorUserId: "bot-user",
callbackUrl: "http://gateway/callback",
commands: [
{
trigger: "oc_status",
description: "status",
autoComplete: true,
},
],
});
const result = await registerSingleStatusCommand(request);
expect(result).toHaveLength(1);
expect(result[0]?.managed).toBe(false);
@ -144,21 +149,7 @@ describe("slash-commands", () => {
}
throw new Error(`unexpected request path: ${path}`);
});
const client = { request } as unknown as MattermostClient;
const result = await registerSlashCommands({
client,
teamId: "team-1",
creatorUserId: "bot-user",
callbackUrl: "http://gateway/callback",
commands: [
{
trigger: "oc_status",
description: "status",
autoComplete: true,
},
],
});
const result = await registerSingleStatusCommand(request);
expect(result).toHaveLength(0);
expect(request).toHaveBeenCalledTimes(1);

View File

@ -58,6 +58,23 @@ const accountFixture: ResolvedMattermostAccount = {
config: {},
};
async function runSlashRequest(params: {
commandTokens: Set<string>;
body: string;
method?: string;
}) {
const handler = createSlashCommandHttpHandler({
account: accountFixture,
cfg: {} as OpenClawConfig,
runtime: {} as RuntimeEnv,
commandTokens: params.commandTokens,
});
const req = createRequest({ method: params.method, body: params.body });
const response = createResponse();
await handler(req, response.res);
return response;
}
describe("slash-http", () => {
it("rejects non-POST methods", async () => {
const handler = createSlashCommandHttpHandler({
@ -93,36 +110,20 @@ describe("slash-http", () => {
});
it("fails closed when no command tokens are registered", async () => {
const handler = createSlashCommandHttpHandler({
account: accountFixture,
cfg: {} as OpenClawConfig,
runtime: {} as RuntimeEnv,
const response = await runSlashRequest({
commandTokens: new Set<string>(),
});
const req = createRequest({
body: "token=tok1&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=",
});
const response = createResponse();
await handler(req, response.res);
expect(response.res.statusCode).toBe(401);
expect(response.getBody()).toContain("Unauthorized: invalid command token.");
});
it("rejects unknown command tokens", async () => {
const handler = createSlashCommandHttpHandler({
account: accountFixture,
cfg: {} as OpenClawConfig,
runtime: {} as RuntimeEnv,
const response = await runSlashRequest({
commandTokens: new Set(["known-token"]),
});
const req = createRequest({
body: "token=unknown&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=",
});
const response = createResponse();
await handler(req, response.res);
expect(response.res.statusCode).toBe(401);
expect(response.getBody()).toContain("Unauthorized: invalid command token.");

View File

@ -4,6 +4,9 @@
"private": true,
"description": "OpenClaw core memory search plugin",
"type": "module",
"devDependencies": {
"openclaw": "workspace:*"
},
"peerDependencies": {
"openclaw": ">=2026.3.11"
},

View File

@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from One Thousand and One Night
| `prompt` | `command` | What is commanded of the djinn |
| `model` | `spirit` | Which spirit answers |
### Unchanged
### Shared appendix
These keywords already work or are too functional to replace sensibly:
Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern.
- `**...**` discretion markers — already work
- `until`, `while` — already work
- `map`, `filter`, `reduce`, `pmap` — pipeline operators
- `max` — constraint modifier
- `as` — aliasing
- Model names: `sonnet`, `opus`, `haiku` — already poetic
Recommended Arabian Nights rewrite targets:
---
## Side-by-Side Comparison
### Simple Program
```prose
# Functional
use "@alice/research" as research
input topic: "What to investigate"
agent helper:
model: sonnet
let findings = session: helper
prompt: "Research {topic}"
output summary = session "Summarize"
context: findings
```
```prose
# Nights
conjure "@alice/research" as research
wish topic: "What to investigate"
djinn helper:
spirit: sonnet
name findings = tale: helper
command: "Research {topic}"
gift summary = tale "Summarize"
scroll: findings
```
### Parallel Execution
```prose
# Functional
parallel:
security = session "Check security"
perf = session "Check performance"
style = session "Check style"
session "Synthesize review"
context: { security, perf, style }
```
```prose
# Nights
bazaar:
security = tale "Check security"
perf = tale "Check performance"
style = tale "Check style"
tale "Synthesize review"
scroll: { security, perf, style }
```
### Loop with Condition
```prose
# Functional
loop until **the code is bug-free** (max: 5):
session "Find and fix bugs"
```
```prose
# Nights
telling until **the code is bug-free** (max: 5):
tale "Find and fix bugs"
```
### Error Handling
```prose
# Functional
try:
session "Risky operation"
catch as err:
session "Handle error"
context: err
finally:
session "Cleanup"
```
```prose
# Nights
venture:
tale "Risky operation"
should misfortune strike as err:
tale "Handle error"
scroll: err
and so it was:
tale "Cleanup"
```
### Choice Block
```prose
# Functional
choice **the severity level**:
option "Critical":
session "Escalate immediately"
option "Minor":
session "Log for later"
```
```prose
# Nights
crossroads **the severity level**:
path "Critical":
tale "Escalate immediately"
path "Minor":
tale "Log for later"
```
### Conditionals
```prose
# Functional
if **has security issues**:
session "Fix security"
elif **has performance issues**:
session "Optimize"
else:
session "Approve"
```
- `session` sample -> `tale`
- `parallel` sample -> `bazaar`
- `loop` sample -> `telling`
- `try/catch/finally` sample -> `venture` / `should misfortune strike` / `and so it was`
- `choice` sample -> `crossroads` / `path`
```prose
# Nights

View File

@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from Greek epic poetry—the Il
| `prompt` | `charge` | The quest given |
| `model` | `muse` | Which muse inspires |
### Unchanged
### Shared appendix
These keywords already work or are too functional to replace sensibly:
Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern.
- `**...**` discretion markers — already work
- `until`, `while` — already work
- `map`, `filter`, `reduce`, `pmap` — pipeline operators
- `max` — constraint modifier
- `as` — aliasing
- Model names: `sonnet`, `opus`, `haiku` — already poetic
Recommended Homeric rewrite targets:
---
## Side-by-Side Comparison
### Simple Program
```prose
# Functional
use "@alice/research" as research
input topic: "What to investigate"
agent helper:
model: sonnet
let findings = session: helper
prompt: "Research {topic}"
output summary = session "Summarize"
context: findings
```
```prose
# Homeric
invoke "@alice/research" as research
omen topic: "What to investigate"
hero helper:
muse: sonnet
decree findings = trial: helper
charge: "Research {topic}"
glory summary = trial "Summarize"
tidings: findings
```
### Parallel Execution
```prose
# Functional
parallel:
security = session "Check security"
perf = session "Check performance"
style = session "Check style"
session "Synthesize review"
context: { security, perf, style }
```
```prose
# Homeric
host:
security = trial "Check security"
perf = trial "Check performance"
style = trial "Check style"
trial "Synthesize review"
tidings: { security, perf, style }
```
### Loop with Condition
```prose
# Functional
loop until **the code is bug-free** (max: 5):
session "Find and fix bugs"
```
```prose
# Homeric
ordeal until **the code is bug-free** (max: 5):
trial "Find and fix bugs"
```
### Error Handling
```prose
# Functional
try:
session "Risky operation"
catch as err:
session "Handle error"
context: err
finally:
session "Cleanup"
```
```prose
# Homeric
venture:
trial "Risky operation"
should ruin come as err:
trial "Handle error"
tidings: err
in the end:
trial "Cleanup"
```
### Choice Block
```prose
# Functional
choice **the severity level**:
option "Critical":
session "Escalate immediately"
option "Minor":
session "Log for later"
```
```prose
# Homeric
crossroads **the severity level**:
path "Critical":
trial "Escalate immediately"
path "Minor":
trial "Log for later"
```
### Conditionals
```prose
# Functional
if **has security issues**:
session "Fix security"
elif **has performance issues**:
session "Optimize"
else:
session "Approve"
```
- `session` sample -> `trial`
- `parallel` sample -> `host`
- `loop` sample -> `ordeal`
- `try/catch/finally` sample -> `venture` / `should ruin come` / `in the end`
- `choice` sample -> `crossroads` / `path`
```prose
# Homeric

View File

@ -0,0 +1,35 @@
---
role: reference
summary: Shared appendix for experimental OpenProse alternate registers.
status: draft
requires: prose.md
---
# OpenProse Alternate Register Appendix
Use this appendix with experimental register files such as `arabian-nights.md` and `homer.md`.
## Unchanged keywords
These keywords already work or are too functional to replace sensibly:
- `**...**` discretion markers
- `until`, `while`
- `map`, `filter`, `reduce`, `pmap`
- `max`
- `as`
- model names such as `sonnet`, `opus`, and `haiku`
## Comparison pattern
Use the translation map in each register file to rewrite the same functional sample programs:
- simple program
- parallel execution
- loop with condition
- error handling
- choice block
- conditionals
The goal is consistency, not one canonical wording.
Keep the functional version intact and rewrite only the register-specific aliases.

View File

@ -87,71 +87,28 @@ The `agents` and `agent_segments` tables for project-scoped agents live in `.pro
## Responsibility Separation
This section defines **who does what**. This is the contract between the VM and subagents.
The VM/subagent contract matches [postgres.md](./postgres.md#responsibility-separation).
### VM Responsibilities
SQLite-specific differences:
The VM (the orchestrating agent running the .prose program) is responsible for:
- the VM creates `state.db` instead of an `openprose` schema
- subagent confirmation messages point at a local database path, for example `.prose/runs/<runId>/state.db`
- cleanup is typically `VACUUM` or file deletion rather than dropping schema objects
| Responsibility | Description |
| ------------------------- | -------------------------------------------------------------------------------------------------------- |
| **Database creation** | Create `state.db` and initialize core tables at run start |
| **Program registration** | Store the program source and metadata |
| **Execution tracking** | Update position, status, and timing as statements execute |
| **Subagent spawning** | Spawn sessions via Task tool with database path and instructions |
| **Parallel coordination** | Track branch status, implement join strategies |
| **Loop management** | Track iteration counts, evaluate conditions |
| **Error aggregation** | Record failures, manage retry state |
| **Context preservation** | Maintain sufficient narration in the main conversation thread so execution can be understood and resumed |
| **Completion detection** | Mark the run as complete when finished |
Example return values:
**Critical:** The VM must preserve enough context in its own conversation to understand execution state without re-reading the entire database. The database is for coordination and persistence, not a replacement for working memory.
### Subagent Responsibilities
Subagents (sessions spawned by the VM) are responsible for:
| Responsibility | Description |
| ----------------------- | ----------------------------------------------------------------- |
| **Writing own outputs** | Insert/update their binding in the `bindings` table |
| **Memory management** | For persistent agents: read and update their memory record |
| **Segment recording** | For persistent agents: append segment history |
| **Attachment handling** | Write large outputs to `attachments/` directory, store path in DB |
| **Atomic writes** | Use transactions when updating multiple related records |
**Critical:** Subagents write ONLY to `bindings`, `agents`, and `agent_segments` tables. The VM owns the `execution` table entirely. Completion signaling happens through the substrate (Task tool return), not database updates.
**Critical:** Subagents must write their outputs directly to the database. The VM does not write subagent outputs—it only reads them after the subagent completes.
**What subagents return to the VM:** A confirmation message with the binding location—not the full content:
**Root scope:**
```
```text
Binding written: research
Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='research', execution_id=NULL)
Summary: AI safety research covering alignment, robustness, and interpretability with 15 citations.
```
**Inside block invocation:**
```
```text
Binding written: result
Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='result', execution_id=43)
Execution ID: 43
Summary: Processed chunk into 3 sub-parts for recursive processing.
```
The VM tracks locations, not values. This keeps the VM's context lean and enables arbitrarily large intermediate values.
### Shared Concerns
| Concern | Who Handles |
| ---------------- | ------------------------------------------------------------------ |
| Schema evolution | Either (use `CREATE TABLE IF NOT EXISTS`, `ALTER TABLE` as needed) |
| Custom tables | Either (prefix with `x_` for extensions) |
| Indexing | Either (add indexes for frequently-queried columns) |
| Cleanup | VM (at run end, optionally vacuum) |
The VM still tracks locations, not full values.
---

View File

@ -137,6 +137,46 @@ describe("slackPlugin outbound", () => {
});
});
describe("slackPlugin agentPrompt", () => {
it("tells agents interactive replies are disabled by default", () => {
const hints = slackPlugin.agentPrompt?.messageToolHints?.({
cfg: {
channels: {
slack: {
botToken: "xoxb-test",
appToken: "xapp-test",
},
},
},
});
expect(hints).toEqual([
"- Slack interactive replies are disabled. If needed, ask to set `channels.slack.capabilities.interactiveReplies=true` (or the same under `channels.slack.accounts.<account>.capabilities`).",
]);
});
it("shows Slack interactive reply directives when enabled", () => {
const hints = slackPlugin.agentPrompt?.messageToolHints?.({
cfg: {
channels: {
slack: {
botToken: "xoxb-test",
appToken: "xapp-test",
capabilities: { interactiveReplies: true },
},
},
},
});
expect(hints).toContain(
"- Slack interactive replies: use `[[slack_buttons: Label:value, Other:other]]` to add action buttons that route clicks back as Slack interaction system events.",
);
expect(hints).toContain(
"- Slack selects: use `[[slack_select: Placeholder | Label:value, Other:other]]` to add a static select menu that routes the chosen value back as a Slack interaction system event.",
);
});
});
describe("slackPlugin config", () => {
it("treats HTTP mode accounts with bot token + signing secret as configured", async () => {
const cfg: OpenClawConfig = {

View File

@ -29,6 +29,7 @@ import {
resolveDefaultSlackAccountId,
resolveSlackAccount,
resolveSlackReplyToMode,
isSlackInteractiveRepliesEnabled,
resolveSlackGroupRequireMention,
resolveSlackGroupToolPolicy,
buildSlackThreadingToolContext,
@ -146,6 +147,17 @@ export const slackPlugin: ChannelPlugin<ResolvedSlackAccount> = {
media: true,
nativeCommands: true,
},
agentPrompt: {
messageToolHints: ({ cfg, accountId }) =>
isSlackInteractiveRepliesEnabled({ cfg, accountId })
? [
"- Slack interactive replies: use `[[slack_buttons: Label:value, Other:other]]` to add action buttons that route clicks back as Slack interaction system events.",
"- Slack selects: use `[[slack_select: Placeholder | Label:value, Other:other]]` to add a static select menu that routes the chosen value back as a Slack interaction system event.",
]
: [
"- Slack interactive replies are disabled. If needed, ask to set `channels.slack.capabilities.interactiveReplies=true` (or the same under `channels.slack.accounts.<account>.capabilities`).",
],
},
streaming: {
blockStreamingCoalesceDefaults: { minChars: 1500, idleMs: 1000 },
},

View File

@ -45,6 +45,27 @@ describe("uploadImageFromUrl", () => {
});
}
async function setupSuccessfulUpload(params?: {
sourceUrl?: string;
contentType?: string;
uploadedUrl?: string;
}) {
const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks();
const sourceUrl = params?.sourceUrl ?? "https://example.com/image.png";
const contentType = params?.contentType ?? "image/png";
const mockBlob = new Blob(["fake-image"], { type: contentType });
mockSuccessfulFetch({
mockFetch,
blob: mockBlob,
finalUrl: sourceUrl,
contentType,
});
if (params?.uploadedUrl) {
mockUploadFile.mockResolvedValue({ url: params.uploadedUrl });
}
return { mockBlob, mockUploadFile, uploadImageFromUrl };
}
beforeEach(() => {
vi.clearAllMocks();
});
@ -54,16 +75,9 @@ describe("uploadImageFromUrl", () => {
});
it("fetches image and calls uploadFile, returns uploaded URL", async () => {
const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks();
const mockBlob = new Blob(["fake-image"], { type: "image/png" });
mockSuccessfulFetch({
mockFetch,
blob: mockBlob,
finalUrl: "https://example.com/image.png",
contentType: "image/png",
const { mockBlob, mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload({
uploadedUrl: "https://memex.tlon.network/uploaded.png",
});
mockUploadFile.mockResolvedValue({ url: "https://memex.tlon.network/uploaded.png" });
const result = await uploadImageFromUrl("https://example.com/image.png");
@ -95,15 +109,7 @@ describe("uploadImageFromUrl", () => {
});
it("returns original URL if upload fails", async () => {
const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks();
const mockBlob = new Blob(["fake-image"], { type: "image/png" });
mockSuccessfulFetch({
mockFetch,
blob: mockBlob,
finalUrl: "https://example.com/image.png",
contentType: "image/png",
});
const { mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload();
mockUploadFile.mockRejectedValue(new Error("Upload failed"));
const result = await uploadImageFromUrl("https://example.com/image.png");

View File

@ -89,56 +89,18 @@ Notes:
- Twilio/Telnyx/Plivo require a **publicly reachable** webhook URL.
- `mock` is a local dev provider (no network calls).
- Telnyx requires `telnyx.publicKey` (or `TELNYX_PUBLIC_KEY`) unless `skipSignatureVerification` is true.
- `tunnel.allowNgrokFreeTierLoopbackBypass: true` allows Twilio webhooks with invalid signatures **only** when `tunnel.provider="ngrok"` and `serve.bind` is loopback (ngrok local agent). Use for local dev only.
Streaming security defaults:
- `streaming.preStartTimeoutMs` closes sockets that never send a valid `start` frame.
- `streaming.maxPendingConnections` caps total unauthenticated pre-start sockets.
- `streaming.maxPendingConnectionsPerIp` caps unauthenticated pre-start sockets per source IP.
- `streaming.maxConnections` caps total open media stream sockets (pending + active).
- advanced webhook, streaming, and tunnel notes: `https://docs.openclaw.ai/plugins/voice-call`
## Stale call reaper
Use `staleCallReaperSeconds` to end calls that never receive a terminal webhook
(for example, notify-mode calls that never complete). The default is `0`
(disabled).
Recommended ranges:
- **Production:** `120``300` seconds for notify-style flows.
- Keep this value **higher than `maxDurationSeconds`** so normal calls can
finish. A good starting point is `maxDurationSeconds + 3060` seconds.
Example:
```json5
{
staleCallReaperSeconds: 360,
}
```
See the plugin docs for recommended ranges and production examples:
`https://docs.openclaw.ai/plugins/voice-call#stale-call-reaper`
## TTS for calls
Voice Call uses the core `messages.tts` configuration (OpenAI or ElevenLabs) for
streaming speech on calls. You can override it under the plugin config with the
same shape — overrides deep-merge with `messages.tts`.
```json5
{
tts: {
provider: "openai",
openai: {
voice: "alloy",
},
},
}
```
Notes:
- Edge TTS is ignored for voice calls (telephony audio needs PCM; Edge output is unreliable).
- Core TTS is used when Twilio media streaming is enabled; otherwise calls fall back to provider native voices.
streaming speech on calls. Override examples and provider caveats live here:
`https://docs.openclaw.ai/plugins/voice-call#tts-for-calls`
## CLI

View File

@ -9,121 +9,87 @@ import {
} from "./manager.test-harness.js";
describe("CallManager verification on restore", () => {
it("skips stale calls reported terminal by provider", async () => {
async function initializeManager(params?: {
callOverrides?: Parameters<typeof makePersistedCall>[0];
providerResult?: FakeProvider["getCallStatusResult"];
configureProvider?: (provider: FakeProvider) => void;
configOverrides?: Partial<{ maxDurationSeconds: number }>;
}) {
const storePath = createTestStorePath();
const call = makePersistedCall();
const call = makePersistedCall(params?.callOverrides);
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
provider.getCallStatusResult = { status: "completed", isTerminal: true };
if (params?.providerResult) {
provider.getCallStatusResult = params.providerResult;
}
params?.configureProvider?.(provider);
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
...params?.configOverrides,
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
return { call, manager };
}
it("skips stale calls reported terminal by provider", async () => {
const { manager } = await initializeManager({
providerResult: { status: "completed", isTerminal: true },
});
expect(manager.getActiveCalls()).toHaveLength(0);
});
it("keeps calls reported active by provider", async () => {
const storePath = createTestStorePath();
const call = makePersistedCall();
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
provider.getCallStatusResult = { status: "in-progress", isTerminal: false };
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
const { call, manager } = await initializeManager({
providerResult: { status: "in-progress", isTerminal: false },
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
expect(manager.getActiveCalls()).toHaveLength(1);
expect(manager.getActiveCalls()[0]?.callId).toBe(call.callId);
});
it("keeps calls when provider returns unknown (transient error)", async () => {
const storePath = createTestStorePath();
const call = makePersistedCall();
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
provider.getCallStatusResult = { status: "error", isTerminal: false, isUnknown: true };
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
const { manager } = await initializeManager({
providerResult: { status: "error", isTerminal: false, isUnknown: true },
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
expect(manager.getActiveCalls()).toHaveLength(1);
});
it("skips calls older than maxDurationSeconds", async () => {
const storePath = createTestStorePath();
const call = makePersistedCall({
startedAt: Date.now() - 600_000,
answeredAt: Date.now() - 590_000,
const { manager } = await initializeManager({
callOverrides: {
startedAt: Date.now() - 600_000,
answeredAt: Date.now() - 590_000,
},
configOverrides: { maxDurationSeconds: 300 },
});
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
maxDurationSeconds: 300,
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
expect(manager.getActiveCalls()).toHaveLength(0);
});
it("skips calls without providerCallId", async () => {
const storePath = createTestStorePath();
const call = makePersistedCall({ providerCallId: undefined, state: "initiated" });
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
const { manager } = await initializeManager({
callOverrides: { providerCallId: undefined, state: "initiated" },
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
expect(manager.getActiveCalls()).toHaveLength(0);
});
it("keeps call when getCallStatus throws (verification failure)", async () => {
const storePath = createTestStorePath();
const call = makePersistedCall();
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
provider.getCallStatus = async () => {
throw new Error("network failure");
};
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
const { manager } = await initializeManager({
configureProvider: (provider) => {
provider.getCallStatus = async () => {
throw new Error("network failure");
};
},
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
expect(manager.getActiveCalls()).toHaveLength(1);
});

View File

@ -21,6 +21,12 @@ function createContext(rawBody: string, query?: WebhookContext["query"]): Webhoo
};
}
function expectStreamingTwiml(body: string) {
expect(body).toContain(STREAM_URL);
expect(body).toContain('<Parameter name="token" value="');
expect(body).toContain("<Connect>");
}
describe("TwilioProvider", () => {
it("returns streaming TwiML for outbound conversation calls before in-progress", () => {
const provider = createProvider();
@ -30,9 +36,8 @@ describe("TwilioProvider", () => {
const result = provider.parseWebhookEvent(ctx);
expect(result.providerResponseBody).toContain(STREAM_URL);
expect(result.providerResponseBody).toContain('<Parameter name="token" value="');
expect(result.providerResponseBody).toContain("<Connect>");
expect(result.providerResponseBody).toBeDefined();
expectStreamingTwiml(result.providerResponseBody ?? "");
});
it("returns empty TwiML for status callbacks", () => {
@ -55,9 +60,8 @@ describe("TwilioProvider", () => {
const result = provider.parseWebhookEvent(ctx);
expect(result.providerResponseBody).toContain(STREAM_URL);
expect(result.providerResponseBody).toContain('<Parameter name="token" value="');
expect(result.providerResponseBody).toContain("<Connect>");
expect(result.providerResponseBody).toBeDefined();
expectStreamingTwiml(result.providerResponseBody ?? "");
});
it("returns queue TwiML for second inbound call when first call is active", () => {

View File

@ -32,6 +32,41 @@ async function waitForPollingLoopStart(): Promise<void> {
await vi.waitFor(() => expect(getUpdatesMock).toHaveBeenCalledTimes(1));
}
const TEST_ACCOUNT = {
accountId: "default",
config: {},
} as unknown as ResolvedZaloAccount;
const TEST_CONFIG = {} as OpenClawConfig;
function createLifecycleRuntime() {
return {
log: vi.fn<(message: string) => void>(),
error: vi.fn<(message: string) => void>(),
};
}
async function startLifecycleMonitor(
options: {
useWebhook?: boolean;
webhookSecret?: string;
webhookUrl?: string;
} = {},
) {
const { monitorZaloProvider } = await import("./monitor.js");
const abort = new AbortController();
const runtime = createLifecycleRuntime();
const run = monitorZaloProvider({
token: "test-token",
account: TEST_ACCOUNT,
config: TEST_CONFIG,
runtime,
abortSignal: abort.signal,
...options,
});
return { abort, runtime, run };
}
describe("monitorZaloProvider lifecycle", () => {
afterEach(() => {
vi.clearAllMocks();
@ -39,26 +74,9 @@ describe("monitorZaloProvider lifecycle", () => {
});
it("stays alive in polling mode until abort", async () => {
const { monitorZaloProvider } = await import("./monitor.js");
const abort = new AbortController();
const runtime = {
log: vi.fn<(message: string) => void>(),
error: vi.fn<(message: string) => void>(),
};
const account = {
accountId: "default",
config: {},
} as unknown as ResolvedZaloAccount;
const config = {} as OpenClawConfig;
let settled = false;
const run = monitorZaloProvider({
token: "test-token",
account,
config,
runtime,
abortSignal: abort.signal,
}).then(() => {
const { abort, runtime, run } = await startLifecycleMonitor();
const monitoredRun = run.then(() => {
settled = true;
});
@ -70,7 +88,7 @@ describe("monitorZaloProvider lifecycle", () => {
expect(settled).toBe(false);
abort.abort();
await run;
await monitoredRun;
expect(settled).toBe(true);
expect(runtime.log).toHaveBeenCalledWith(
@ -84,25 +102,7 @@ describe("monitorZaloProvider lifecycle", () => {
result: { url: "https://example.com/hooks/zalo" },
});
const { monitorZaloProvider } = await import("./monitor.js");
const abort = new AbortController();
const runtime = {
log: vi.fn<(message: string) => void>(),
error: vi.fn<(message: string) => void>(),
};
const account = {
accountId: "default",
config: {},
} as unknown as ResolvedZaloAccount;
const config = {} as OpenClawConfig;
const run = monitorZaloProvider({
token: "test-token",
account,
config,
runtime,
abortSignal: abort.signal,
});
const { abort, runtime, run } = await startLifecycleMonitor();
await waitForPollingLoopStart();
@ -120,25 +120,7 @@ describe("monitorZaloProvider lifecycle", () => {
const { ZaloApiError } = await import("./api.js");
getWebhookInfoMock.mockRejectedValueOnce(new ZaloApiError("Not Found", 404, "Not Found"));
const { monitorZaloProvider } = await import("./monitor.js");
const abort = new AbortController();
const runtime = {
log: vi.fn<(message: string) => void>(),
error: vi.fn<(message: string) => void>(),
};
const account = {
accountId: "default",
config: {},
} as unknown as ResolvedZaloAccount;
const config = {} as OpenClawConfig;
const run = monitorZaloProvider({
token: "test-token",
account,
config,
runtime,
abortSignal: abort.signal,
});
const { abort, runtime, run } = await startLifecycleMonitor();
await waitForPollingLoopStart();
@ -165,29 +147,13 @@ describe("monitorZaloProvider lifecycle", () => {
}),
);
const { monitorZaloProvider } = await import("./monitor.js");
const abort = new AbortController();
const runtime = {
log: vi.fn<(message: string) => void>(),
error: vi.fn<(message: string) => void>(),
};
const account = {
accountId: "default",
config: {},
} as unknown as ResolvedZaloAccount;
const config = {} as OpenClawConfig;
let settled = false;
const run = monitorZaloProvider({
token: "test-token",
account,
config,
runtime,
abortSignal: abort.signal,
const { abort, runtime, run } = await startLifecycleMonitor({
useWebhook: true,
webhookUrl: "https://example.com/hooks/zalo",
webhookSecret: "supersecret", // pragma: allowlist secret
}).then(() => {
});
const monitoredRun = run.then(() => {
settled = true;
});
@ -202,7 +168,7 @@ describe("monitorZaloProvider lifecycle", () => {
expect(registry.httpRoutes).toHaveLength(1);
resolveDeleteWebhook?.();
await run;
await monitoredRun;
expect(settled).toBe(true);
expect(registry.httpRoutes).toHaveLength(0);

View File

@ -187,6 +187,31 @@ function installRuntime(params: {
};
}
function installGroupCommandAuthRuntime() {
return installRuntime({
resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) =>
useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed),
});
}
async function processGroupControlCommand(params: {
account: ResolvedZalouserAccount;
content?: string;
commandContent?: string;
}) {
await __testing.processMessage({
message: createGroupMessage({
content: params.content ?? "/new",
commandContent: params.commandContent ?? "/new",
hasAnyMention: true,
wasExplicitlyMentioned: true,
}),
account: params.account,
config: createConfig(),
runtime: createRuntimeEnv(),
});
}
function createGroupMessage(overrides: Partial<ZaloInboundMessage> = {}): ZaloInboundMessage {
return {
threadId: "g-1",
@ -229,57 +254,152 @@ describe("zalouser monitor group mention gating", () => {
sendSeenZalouserMock.mockClear();
});
it("skips unmentioned group messages when requireMention=true", async () => {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: false,
});
async function processMessageWithDefaults(params: {
message: ZaloInboundMessage;
account?: ResolvedZalouserAccount;
historyState?: {
historyLimit: number;
groupHistories: Map<
string,
Array<{ sender: string; body: string; timestamp?: number; messageId?: string }>
>;
};
}) {
await __testing.processMessage({
message: createGroupMessage(),
account: createAccount(),
message: params.message,
account: params.account ?? createAccount(),
config: createConfig(),
runtime: createRuntimeEnv(),
historyState: params.historyState,
});
}
expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
expect(sendTypingZalouserMock).not.toHaveBeenCalled();
});
it("fails closed when requireMention=true but mention detection is unavailable", async () => {
async function expectSkippedGroupMessage(message?: Partial<ZaloInboundMessage>) {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: false,
});
await __testing.processMessage({
message: createGroupMessage({
canResolveExplicitMention: false,
hasAnyMention: false,
wasExplicitlyMentioned: false,
}),
account: createAccount(),
config: createConfig(),
runtime: createRuntimeEnv(),
await processMessageWithDefaults({
message: createGroupMessage(message),
});
expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
expect(sendTypingZalouserMock).not.toHaveBeenCalled();
});
}
it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => {
async function expectGroupCommandAuthorizers(params: {
accountConfig: ResolvedZalouserAccount["config"];
expectedAuthorizers: Array<{ configured: boolean; allowed: boolean }>;
}) {
const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } =
installGroupCommandAuthRuntime();
await processGroupControlCommand({
account: {
...createAccount(),
config: params.accountConfig,
},
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0];
expect(authCall?.authorizers).toEqual(params.expectedAuthorizers);
}
async function processOpenDmMessage(params?: {
message?: Partial<ZaloInboundMessage>;
readSessionUpdatedAt?: (input?: {
storePath: string;
sessionKey: string;
}) => number | undefined;
}) {
const runtime = installRuntime({
commandAuthorized: false,
});
if (params?.readSessionUpdatedAt) {
runtime.readSessionUpdatedAt.mockImplementation(params.readSessionUpdatedAt);
}
const account = createAccount();
await processMessageWithDefaults({
message: createDmMessage(params?.message),
account: {
...account,
config: {
...account.config,
dmPolicy: "open",
},
},
});
return runtime;
}
async function expectDangerousNameMatching(params: {
dangerouslyAllowNameMatching?: boolean;
expectedDispatches: number;
}) {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: false,
});
await __testing.processMessage({
await processMessageWithDefaults({
message: createGroupMessage({
threadId: "g-attacker-001",
groupName: "Trusted Team",
senderId: "666",
hasAnyMention: true,
wasExplicitlyMentioned: true,
content: "ping @bot",
}),
account: createAccount(),
config: createConfig(),
runtime: createRuntimeEnv(),
account: {
...createAccount(),
config: {
...createAccount().config,
...(params.dangerouslyAllowNameMatching ? { dangerouslyAllowNameMatching: true } : {}),
groupPolicy: "allowlist",
groupAllowFrom: ["*"],
groups: {
"group:g-trusted-001": { allow: true },
"Trusted Team": { allow: true },
},
},
},
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(
params.expectedDispatches,
);
return dispatchReplyWithBufferedBlockDispatcher;
}
async function dispatchGroupMessage(params: {
commandAuthorized: boolean;
message: Partial<ZaloInboundMessage>;
}) {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: params.commandAuthorized,
});
await processMessageWithDefaults({
message: createGroupMessage(params.message),
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
return dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
}
it("skips unmentioned group messages when requireMention=true", async () => {
await expectSkippedGroupMessage();
});
it("fails closed when requireMention=true but mention detection is unavailable", async () => {
await expectSkippedGroupMessage({
canResolveExplicitMention: false,
hasAnyMention: false,
wasExplicitlyMentioned: false,
});
});
it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => {
const callArg = await dispatchGroupMessage({
commandAuthorized: false,
message: {
hasAnyMention: true,
wasExplicitlyMentioned: true,
content: "ping @bot",
},
});
expect(callArg?.ctx?.WasMentioned).toBe(true);
expect(callArg?.ctx?.To).toBe("zalouser:group:g-1");
expect(callArg?.ctx?.OriginatingTo).toBe("zalouser:group:g-1");
@ -290,22 +410,14 @@ describe("zalouser monitor group mention gating", () => {
});
it("allows authorized control commands to bypass mention gating", async () => {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
const callArg = await dispatchGroupMessage({
commandAuthorized: true,
});
await __testing.processMessage({
message: createGroupMessage({
message: {
content: "/status",
hasAnyMention: false,
wasExplicitlyMentioned: false,
}),
account: createAccount(),
config: createConfig(),
runtime: createRuntimeEnv(),
},
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
expect(callArg?.ctx?.WasMentioned).toBe(true);
});
@ -346,57 +458,30 @@ describe("zalouser monitor group mention gating", () => {
});
it("uses commandContent for mention-prefixed control commands", async () => {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
const callArg = await dispatchGroupMessage({
commandAuthorized: true,
});
await __testing.processMessage({
message: createGroupMessage({
message: {
content: "@Bot /new",
commandContent: "/new",
hasAnyMention: true,
wasExplicitlyMentioned: true,
}),
account: createAccount(),
config: createConfig(),
runtime: createRuntimeEnv(),
},
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
expect(callArg?.ctx?.CommandBody).toBe("/new");
expect(callArg?.ctx?.BodyForCommands).toBe("/new");
});
it("allows group control commands when only allowFrom is configured", async () => {
const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } =
installRuntime({
resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) =>
useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed),
});
await __testing.processMessage({
message: createGroupMessage({
content: "/new",
commandContent: "/new",
hasAnyMention: true,
wasExplicitlyMentioned: true,
}),
account: {
...createAccount(),
config: {
...createAccount().config,
allowFrom: ["123"],
},
await expectGroupCommandAuthorizers({
accountConfig: {
...createAccount().config,
allowFrom: ["123"],
},
config: createConfig(),
runtime: createRuntimeEnv(),
expectedAuthorizers: [
{ configured: true, allowed: true },
{ configured: true, allowed: true },
],
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0];
expect(authCall?.authorizers).toEqual([
{ configured: true, allowed: true },
{ configured: true, allowed: true },
]);
});
it("blocks group messages when sender is not in groupAllowFrom/allowFrom", async () => {
@ -425,123 +510,35 @@ describe("zalouser monitor group mention gating", () => {
});
it("does not accept a different group id by matching only the mutable group name by default", async () => {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: false,
});
await __testing.processMessage({
message: createGroupMessage({
threadId: "g-attacker-001",
groupName: "Trusted Team",
senderId: "666",
hasAnyMention: true,
wasExplicitlyMentioned: true,
content: "ping @bot",
}),
account: {
...createAccount(),
config: {
...createAccount().config,
groupPolicy: "allowlist",
groupAllowFrom: ["*"],
groups: {
"group:g-trusted-001": { allow: true },
"Trusted Team": { allow: true },
},
},
},
config: createConfig(),
runtime: createRuntimeEnv(),
});
expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
await expectDangerousNameMatching({ expectedDispatches: 0 });
});
it("accepts mutable group-name matches only when dangerouslyAllowNameMatching is enabled", async () => {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: false,
const dispatchReplyWithBufferedBlockDispatcher = await expectDangerousNameMatching({
dangerouslyAllowNameMatching: true,
expectedDispatches: 1,
});
await __testing.processMessage({
message: createGroupMessage({
threadId: "g-attacker-001",
groupName: "Trusted Team",
senderId: "666",
hasAnyMention: true,
wasExplicitlyMentioned: true,
content: "ping @bot",
}),
account: {
...createAccount(),
config: {
...createAccount().config,
dangerouslyAllowNameMatching: true,
groupPolicy: "allowlist",
groupAllowFrom: ["*"],
groups: {
"group:g-trusted-001": { allow: true },
"Trusted Team": { allow: true },
},
},
},
config: createConfig(),
runtime: createRuntimeEnv(),
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
expect(callArg?.ctx?.To).toBe("zalouser:group:g-attacker-001");
});
it("allows group control commands when sender is in groupAllowFrom", async () => {
const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } =
installRuntime({
resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) =>
useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed),
});
await __testing.processMessage({
message: createGroupMessage({
content: "/new",
commandContent: "/new",
hasAnyMention: true,
wasExplicitlyMentioned: true,
}),
account: {
...createAccount(),
config: {
...createAccount().config,
allowFrom: ["999"],
groupAllowFrom: ["123"],
},
await expectGroupCommandAuthorizers({
accountConfig: {
...createAccount().config,
allowFrom: ["999"],
groupAllowFrom: ["123"],
},
config: createConfig(),
runtime: createRuntimeEnv(),
expectedAuthorizers: [
{ configured: true, allowed: false },
{ configured: true, allowed: true },
],
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0];
expect(authCall?.authorizers).toEqual([
{ configured: true, allowed: false },
{ configured: true, allowed: true },
]);
});
it("routes DM messages with direct peer kind", async () => {
const { dispatchReplyWithBufferedBlockDispatcher, resolveAgentRoute, buildAgentSessionKey } =
installRuntime({
commandAuthorized: false,
});
const account = createAccount();
await __testing.processMessage({
message: createDmMessage(),
account: {
...account,
config: {
...account.config,
dmPolicy: "open",
},
},
config: createConfig(),
runtime: createRuntimeEnv(),
});
await processOpenDmMessage();
expect(resolveAgentRoute).toHaveBeenCalledWith(
expect.objectContaining({
@ -559,24 +556,9 @@ describe("zalouser monitor group mention gating", () => {
});
it("reuses the legacy DM session key when only the old group-shaped session exists", async () => {
const { dispatchReplyWithBufferedBlockDispatcher, readSessionUpdatedAt } = installRuntime({
commandAuthorized: false,
});
readSessionUpdatedAt.mockImplementation((input?: { storePath: string; sessionKey: string }) =>
input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined,
);
const account = createAccount();
await __testing.processMessage({
message: createDmMessage(),
account: {
...account,
config: {
...account.config,
dmPolicy: "open",
},
},
config: createConfig(),
runtime: createRuntimeEnv(),
const { dispatchReplyWithBufferedBlockDispatcher } = await processOpenDmMessage({
readSessionUpdatedAt: (input?: { storePath: string; sessionKey: string }) =>
input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined,
});
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];

View File

@ -353,6 +353,7 @@
"@mariozechner/pi-ai": "0.57.1",
"@mariozechner/pi-coding-agent": "0.57.1",
"@mariozechner/pi-tui": "0.57.1",
"@modelcontextprotocol/sdk": "1.27.1",
"@mozilla/readability": "^0.6.0",
"@sinclair/typebox": "0.34.48",
"@slack/bolt": "^4.6.0",

451
pnpm-lock.yaml generated
View File

@ -60,16 +60,19 @@ importers:
version: 1.2.0-beta.3
'@mariozechner/pi-agent-core':
specifier: 0.57.1
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai':
specifier: 0.57.1
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-coding-agent':
specifier: 0.57.1
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-tui':
specifier: 0.57.1
version: 0.57.1
'@modelcontextprotocol/sdk':
specifier: 1.27.1
version: 1.27.1(zod@4.3.6)
'@mozilla/readability':
specifier: ^0.6.0
version: 0.6.0
@ -344,9 +347,10 @@ importers:
google-auth-library:
specifier: ^10.6.1
version: 10.6.1
devDependencies:
openclaw:
specifier: '>=2026.3.11'
version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3))
specifier: workspace:*
version: link:../..
extensions/imessage: {}
@ -377,7 +381,7 @@ importers:
dependencies:
'@mariozechner/pi-agent-core':
specifier: 0.57.1
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@matrix-org/matrix-sdk-crypto-nodejs':
specifier: ^0.4.0
version: 0.4.0
@ -404,10 +408,10 @@ importers:
version: 4.3.6
extensions/memory-core:
dependencies:
devDependencies:
openclaw:
specifier: '>=2026.3.11'
version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3))
specifier: workspace:*
version: link:../..
extensions/memory-lancedb:
dependencies:
@ -651,10 +655,6 @@ packages:
resolution: {integrity: sha512-t8cl+bPLlHZQD2Sw1a4hSLUybqJZU71+m8znkyeU8CHntFqEp2mMbuLKdHKaAYQ1fAApXMsvzenCAkDzNeeJlw==}
engines: {node: '>=20.0.0'}
'@aws-sdk/client-bedrock@3.1007.0':
resolution: {integrity: sha512-49hH8o6ALKkCiBUgg20HkwxNamP1yYA/n8Si73Z438EqhZGpCfScP3FfxVhrfD5o+4bV4Whi9BTzPKCa/PfUww==}
engines: {node: '>=20.0.0'}
'@aws-sdk/client-bedrock@3.1008.0':
resolution: {integrity: sha512-mzxO/DplpZZT7AIZUCG7Q78OlaeHeDybYz+ZlWZPaXFjGDJwUv1E3SKskmaaQvTsMeieie0WX7gzueYrCx4YfQ==}
engines: {node: '>=20.0.0'}
@ -711,10 +711,6 @@ packages:
resolution: {integrity: sha512-dFqh7nfX43B8dO1aPQHOcjC0SnCJ83H3F+1LoCh3X1P7E7N09I+0/taID0asU6GCddfDExqnEvQtDdkuMe5tKQ==}
engines: {node: '>=20.0.0'}
'@aws-sdk/credential-provider-ini@3.972.18':
resolution: {integrity: sha512-vthIAXJISZnj2576HeyLBj4WTeX+I7PwWeRkbOa0mVX39K13SCGxCgOFuKj2ytm9qTlLOmXe4cdEnroteFtJfw==}
engines: {node: '>=20.0.0'}
'@aws-sdk/credential-provider-ini@3.972.19':
resolution: {integrity: sha512-pVJVjWqVrPqjpFq7o0mCmeZu1Y0c94OCHSYgivdCD2wfmYVtBbwQErakruhgOD8pcMcx9SCqRw1pzHKR7OGBcA==}
engines: {node: '>=20.0.0'}
@ -727,10 +723,6 @@ packages:
resolution: {integrity: sha512-gf2E5b7LpKb+JX2oQsRIDxdRZjBFZt2olCGlWCdb3vBERbXIPgm2t1R5mEnwd4j0UEO/Tbg5zN2KJbHXttJqwA==}
engines: {node: '>=20.0.0'}
'@aws-sdk/credential-provider-login@3.972.18':
resolution: {integrity: sha512-kINzc5BBxdYBkPZ0/i1AMPMOk5b5QaFNbYMElVw5QTX13AKj6jcxnv/YNl9oW9mg+Y08ti19hh01HhyEAxsSJQ==}
engines: {node: '>=20.0.0'}
'@aws-sdk/credential-provider-login@3.972.19':
resolution: {integrity: sha512-jOXdZ1o+CywQKr6gyxgxuUmnGwTTnY2Kxs1PM7fI6AYtDWDnmW/yKXayNqkF8KjP1unflqMWKVbVt5VgmE3L0g==}
engines: {node: '>=20.0.0'}
@ -743,10 +735,6 @@ packages:
resolution: {integrity: sha512-ZDJa2gd1xiPg/nBDGhUlat02O8obaDEnICBAVS8qieZ0+nDfaB0Z3ec6gjZj27OqFTjnB/Q5a0GwQwb7rMVViw==}
engines: {node: '>=20.0.0'}
'@aws-sdk/credential-provider-node@3.972.19':
resolution: {integrity: sha512-yDWQ9dFTr+IMxwanFe7+tbN5++q8psZBjlUwOiCXn1EzANoBgtqBwcpYcHaMGtn0Wlfj4NuXdf2JaEx1lz5RaQ==}
engines: {node: '>=20.0.0'}
'@aws-sdk/credential-provider-node@3.972.20':
resolution: {integrity: sha512-0xHca2BnPY0kzjDYPH7vk8YbfdBPpWVS67rtqQMalYDQUCBYS37cZ55K6TuFxCoIyNZgSCFrVKr9PXC5BVvQQw==}
engines: {node: '>=20.0.0'}
@ -771,10 +759,6 @@ packages:
resolution: {integrity: sha512-wGtte+48xnhnhHMl/MsxzacBPs5A+7JJedjiP452IkHY7vsbYKcvQBqFye8LwdTJVeHtBHv+JFeTscnwepoWGg==}
engines: {node: '>=20.0.0'}
'@aws-sdk/credential-provider-sso@3.972.18':
resolution: {integrity: sha512-YHYEfj5S2aqInRt5ub8nDOX8vAxgMvd84wm2Y3WVNfFa/53vOv9T7WOAqXI25qjj3uEcV46xxfqdDQk04h5XQA==}
engines: {node: '>=20.0.0'}
'@aws-sdk/credential-provider-sso@3.972.19':
resolution: {integrity: sha512-kVjQsEU3b///q7EZGrUzol9wzwJFKbEzqJKSq82A9ShrUTEO7FNylTtby3sPV19ndADZh1H3FB3+5ZrvKtEEeg==}
engines: {node: '>=20.0.0'}
@ -787,10 +771,6 @@ packages:
resolution: {integrity: sha512-8aiVJh6fTdl8gcyL+sVNcNwTtWpmoFa1Sh7xlj6Z7L/cZ/tYMEBHq44wTYG8Kt0z/PpGNopD89nbj3FHl9QmTA==}
engines: {node: '>=20.0.0'}
'@aws-sdk/credential-provider-web-identity@3.972.18':
resolution: {integrity: sha512-OqlEQpJ+J3T5B96qtC1zLLwkBloechP+fezKbCH0sbd2cCc0Ra55XpxWpk/hRj69xAOYtHvoC4orx6eTa4zU7g==}
engines: {node: '>=20.0.0'}
'@aws-sdk/credential-provider-web-identity@3.972.19':
resolution: {integrity: sha512-BV1BlTFdG4w4tAihxN7iXDBoNcNewXD4q8uZlNQiUrnqxwGWUhKHODIQVSPlQGxXClEj+63m+cqZskw+ESmeZg==}
engines: {node: '>=20.0.0'}
@ -875,10 +855,6 @@ packages:
resolution: {integrity: sha512-MlGWA8uPaOs5AiTZ5JLM4uuWDm9EEAnm9cqwvqQIc6kEgel/8s1BaOWm9QgUcfc9K8qd7KkC3n43yDbeXOA2tg==}
engines: {node: '>=20.0.0'}
'@aws-sdk/nested-clients@3.996.8':
resolution: {integrity: sha512-6HlLm8ciMW8VzfB80kfIx16PBA9lOa9Dl+dmCBi78JDhvGlx3I7Rorwi5PpVRkL31RprXnYna3yBf6UKkD/PqA==}
engines: {node: '>=20.0.0'}
'@aws-sdk/nested-clients@3.996.9':
resolution: {integrity: sha512-+RpVtpmQbbtzFOKhMlsRcXM/3f1Z49qTOHaA8gEpHOYruERmog6f2AUtf/oTRLCWjR9H2b3roqryV/hI7QMW8w==}
engines: {node: '>=20.0.0'}
@ -903,14 +879,6 @@ packages:
resolution: {integrity: sha512-j9BwZZId9sFp+4GPhf6KrwO8Tben2sXibZA8D1vv2I1zBdvkUHcBA2g4pkqIpTRalMTLC0NPkBPX0gERxfy/iA==}
engines: {node: '>=20.0.0'}
'@aws-sdk/token-providers@3.1005.0':
resolution: {integrity: sha512-vMxd+ivKqSxU9bHx5vmAlFKDAkjGotFU56IOkDa5DaTu1WWwbcse0yFHEm9I537oVvodaiwMl3VBwgHfzQ2rvw==}
engines: {node: '>=20.0.0'}
'@aws-sdk/token-providers@3.1007.0':
resolution: {integrity: sha512-kKvVyr53vvVc5k6RbvI6jhafxufxO2SkEw8QeEzJqwOXH/IMY7Cm0IyhnBGdqj80iiIIiIM2jGe7Fn3TIdwdrw==}
engines: {node: '>=20.0.0'}
'@aws-sdk/token-providers@3.1008.0':
resolution: {integrity: sha512-TulwlHQBWcJs668kNUDMZHN51DeLrDsYT59Ux4a/nbvr025gM6HjKJJ3LvnZccam7OS/ZKUVkWomCneRQKJbBg==}
engines: {node: '>=20.0.0'}
@ -979,15 +947,6 @@ packages:
aws-crt:
optional: true
'@aws-sdk/util-user-agent-node@3.973.5':
resolution: {integrity: sha512-Dyy38O4GeMk7UQ48RupfHif//gqnOPbq/zlvRssc11E2mClT+aUfc3VS2yD8oLtzqO3RsqQ9I3gOBB4/+HjPOw==}
engines: {node: '>=20.0.0'}
peerDependencies:
aws-crt: '>=1.0.0'
peerDependenciesMeta:
aws-crt:
optional: true
'@aws-sdk/util-user-agent-node@3.973.6':
resolution: {integrity: sha512-iF7G0prk7AvmOK64FcLvc/fW+Ty1H+vttajL7PvJFReU8urMxfYmynTTuFKDTA76Wgpq3FzTPKwabMQIXQHiXQ==}
engines: {node: '>=20.0.0'}
@ -1828,6 +1787,16 @@ packages:
'@mistralai/mistralai@1.14.1':
resolution: {integrity: sha512-IiLmmZFCCTReQgPAT33r7KQ1nYo5JPdvGkrkZqA8qQ2qB1GHgs5LoP5K2ICyrjnpw2n8oSxMM/VP+liiKcGNlQ==}
'@modelcontextprotocol/sdk@1.27.1':
resolution: {integrity: sha512-sr6GbP+4edBwFndLbM60gf07z0FQ79gaExpnsjMGePXqFcSSb7t6iscpjk9DhFhwd+mTEQrzNafGP8/iGGFYaA==}
engines: {node: '>=18'}
peerDependencies:
'@cfworker/json-schema': ^4.1.1
zod: ^3.25 || ^4.0
peerDependenciesMeta:
'@cfworker/json-schema':
optional: true
'@mozilla/readability@0.6.0':
resolution: {integrity: sha512-juG5VWh4qAivzTAeMzvY9xs9HY5rAcr2E4I7tiSSCokRFi7XIZCAu92ZkSTsIj1OPceCifL3cpfteP3pDT9/QQ==}
engines: {node: '>=14.0.0'}
@ -4271,6 +4240,10 @@ packages:
core-util-is@1.0.3:
resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==}
cors@2.8.6:
resolution: {integrity: sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==}
engines: {node: '>= 0.10'}
croner@10.0.1:
resolution: {integrity: sha512-ixNtAJndqh173VQ4KodSdJEI6nuioBWI0V1ITNKhZZsO0pEMoDxz539T4FTTbSZ/xIOSuDnzxLVRqBVSvPNE2g==}
engines: {node: '>=18.0'}
@ -4550,6 +4523,14 @@ packages:
events-universal@1.0.1:
resolution: {integrity: sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==}
eventsource-parser@3.0.6:
resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==}
engines: {node: '>=18.0.0'}
eventsource@3.0.7:
resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==}
engines: {node: '>=18.0.0'}
execa@4.1.0:
resolution: {integrity: sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==}
engines: {node: '>=10'}
@ -4561,6 +4542,12 @@ packages:
exponential-backoff@3.1.3:
resolution: {integrity: sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==}
express-rate-limit@8.3.1:
resolution: {integrity: sha512-D1dKN+cmyPWuvB+G2SREQDzPY1agpBIcTa9sJxOPMCNeH3gwzhqJRDWCXW3gg0y//+LQ/8j52JbMROWyrKdMdw==}
engines: {node: '>= 16'}
peerDependencies:
express: '>= 4.11'
express@4.22.1:
resolution: {integrity: sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==}
engines: {node: '>= 0.10.0'}
@ -5058,6 +5045,9 @@ packages:
jose@4.15.9:
resolution: {integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==}
jose@6.2.1:
resolution: {integrity: sha512-jUaKr1yrbfaImV7R2TN/b3IcZzsw38/chqMpo2XJ7i2F8AfM/lA4G1goC3JVEwg0H7UldTmSt3P68nt31W7/mw==}
js-stringify@1.0.2:
resolution: {integrity: sha512-rtS5ATOo2Q5k1G+DADISilDA6lv79zIiwFd6CcjuIxGKLFm5C+RLImRscVap9k55i+MOZwgliw+NejvkLuGD5g==}
@ -5102,6 +5092,9 @@ packages:
json-schema-traverse@1.0.0:
resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==}
json-schema-typed@8.0.2:
resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==}
json-schema@0.4.0:
resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==}
@ -5689,14 +5682,6 @@ packages:
zod:
optional: true
openclaw@2026.3.11:
resolution: {integrity: sha512-bxwiBmHPakwfpY5tqC9lrV5TCu5PKf0c1bHNc3nhrb+pqKcPEWV4zOjDVFLQUHr98ihgWA+3pacy4b3LQ8wduQ==}
engines: {node: '>=22.12.0'}
hasBin: true
peerDependencies:
'@napi-rs/canvas': ^0.1.89
node-llama-cpp: 3.16.2
opus-decoder@0.7.11:
resolution: {integrity: sha512-+e+Jz3vGQLxRTBHs8YJQPRPc1Tr+/aC6coV/DlZylriA29BdHQAYXhvNRKtjftof17OFng0+P4wsFIqQu3a48A==}
@ -5870,6 +5855,10 @@ packages:
resolution: {integrity: sha512-8OEwKp5juEvb/MjpIc4hjqfgCNysrS94RIOMXYvpYCdm/jglrKEiAYmiumbmGhCvs+IcInsphYDFwqrjr7398w==}
hasBin: true
pkce-challenge@5.0.1:
resolution: {integrity: sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==}
engines: {node: '>=16.20.0'}
playwright-core@1.58.2:
resolution: {integrity: sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==}
engines: {node: '>=18'}
@ -6667,10 +6656,6 @@ packages:
undici-types@7.18.2:
resolution: {integrity: sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==}
undici@7.22.0:
resolution: {integrity: sha512-RqslV2Us5BrllB+JeiZnK4peryVTndy9Dnqq62S3yYRRTj0tFQCwEniUy2167skdGOy3vqRzEvl1Dm4sV2ReDg==}
engines: {node: '>=20.18.1'}
undici@7.24.0:
resolution: {integrity: sha512-jxytwMHhsbdpBXxLAcuu0fzlQeXCNnWdDyRHpvWsUl8vd98UwYdl9YTyn8/HcpcJPC3pwUveefsa3zTxyD/ERg==}
engines: {node: '>=20.18.1'}
@ -7120,51 +7105,6 @@ snapshots:
transitivePeerDependencies:
- aws-crt
'@aws-sdk/client-bedrock@3.1007.0':
dependencies:
'@aws-crypto/sha256-browser': 5.2.0
'@aws-crypto/sha256-js': 5.2.0
'@aws-sdk/core': 3.973.19
'@aws-sdk/credential-provider-node': 3.972.19
'@aws-sdk/middleware-host-header': 3.972.7
'@aws-sdk/middleware-logger': 3.972.7
'@aws-sdk/middleware-recursion-detection': 3.972.7
'@aws-sdk/middleware-user-agent': 3.972.20
'@aws-sdk/region-config-resolver': 3.972.7
'@aws-sdk/token-providers': 3.1007.0
'@aws-sdk/types': 3.973.5
'@aws-sdk/util-endpoints': 3.996.4
'@aws-sdk/util-user-agent-browser': 3.972.7
'@aws-sdk/util-user-agent-node': 3.973.5
'@smithy/config-resolver': 4.4.10
'@smithy/core': 3.23.9
'@smithy/fetch-http-handler': 5.3.13
'@smithy/hash-node': 4.2.11
'@smithy/invalid-dependency': 4.2.11
'@smithy/middleware-content-length': 4.2.11
'@smithy/middleware-endpoint': 4.4.23
'@smithy/middleware-retry': 4.4.40
'@smithy/middleware-serde': 4.2.12
'@smithy/middleware-stack': 4.2.11
'@smithy/node-config-provider': 4.3.11
'@smithy/node-http-handler': 4.4.14
'@smithy/protocol-http': 5.3.11
'@smithy/smithy-client': 4.12.3
'@smithy/types': 4.13.0
'@smithy/url-parser': 4.2.11
'@smithy/util-base64': 4.3.2
'@smithy/util-body-length-browser': 4.2.2
'@smithy/util-body-length-node': 4.2.3
'@smithy/util-defaults-mode-browser': 4.3.39
'@smithy/util-defaults-mode-node': 4.2.42
'@smithy/util-endpoints': 3.3.2
'@smithy/util-middleware': 4.2.11
'@smithy/util-retry': 4.2.11
'@smithy/util-utf8': 4.2.2
tslib: 2.8.1
transitivePeerDependencies:
- aws-crt
'@aws-sdk/client-bedrock@3.1008.0':
dependencies:
'@aws-crypto/sha256-browser': 5.2.0
@ -7424,25 +7364,6 @@ snapshots:
transitivePeerDependencies:
- aws-crt
'@aws-sdk/credential-provider-ini@3.972.18':
dependencies:
'@aws-sdk/core': 3.973.19
'@aws-sdk/credential-provider-env': 3.972.17
'@aws-sdk/credential-provider-http': 3.972.19
'@aws-sdk/credential-provider-login': 3.972.18
'@aws-sdk/credential-provider-process': 3.972.17
'@aws-sdk/credential-provider-sso': 3.972.18
'@aws-sdk/credential-provider-web-identity': 3.972.18
'@aws-sdk/nested-clients': 3.996.8
'@aws-sdk/types': 3.973.5
'@smithy/credential-provider-imds': 4.2.11
'@smithy/property-provider': 4.2.11
'@smithy/shared-ini-file-loader': 4.4.6
'@smithy/types': 4.13.0
tslib: 2.8.1
transitivePeerDependencies:
- aws-crt
'@aws-sdk/credential-provider-ini@3.972.19':
dependencies:
'@aws-sdk/core': 3.973.19
@ -7488,19 +7409,6 @@ snapshots:
transitivePeerDependencies:
- aws-crt
'@aws-sdk/credential-provider-login@3.972.18':
dependencies:
'@aws-sdk/core': 3.973.19
'@aws-sdk/nested-clients': 3.996.8
'@aws-sdk/types': 3.973.5
'@smithy/property-provider': 4.2.11
'@smithy/protocol-http': 5.3.11
'@smithy/shared-ini-file-loader': 4.4.6
'@smithy/types': 4.13.0
tslib: 2.8.1
transitivePeerDependencies:
- aws-crt
'@aws-sdk/credential-provider-login@3.972.19':
dependencies:
'@aws-sdk/core': 3.973.19
@ -7548,23 +7456,6 @@ snapshots:
transitivePeerDependencies:
- aws-crt
'@aws-sdk/credential-provider-node@3.972.19':
dependencies:
'@aws-sdk/credential-provider-env': 3.972.17
'@aws-sdk/credential-provider-http': 3.972.19
'@aws-sdk/credential-provider-ini': 3.972.18
'@aws-sdk/credential-provider-process': 3.972.17
'@aws-sdk/credential-provider-sso': 3.972.18
'@aws-sdk/credential-provider-web-identity': 3.972.18
'@aws-sdk/types': 3.973.5
'@smithy/credential-provider-imds': 4.2.11
'@smithy/property-provider': 4.2.11
'@smithy/shared-ini-file-loader': 4.4.6
'@smithy/types': 4.13.0
tslib: 2.8.1
transitivePeerDependencies:
- aws-crt
'@aws-sdk/credential-provider-node@3.972.20':
dependencies:
'@aws-sdk/credential-provider-env': 3.972.17
@ -7635,19 +7526,6 @@ snapshots:
transitivePeerDependencies:
- aws-crt
'@aws-sdk/credential-provider-sso@3.972.18':
dependencies:
'@aws-sdk/core': 3.973.19
'@aws-sdk/nested-clients': 3.996.8
'@aws-sdk/token-providers': 3.1005.0
'@aws-sdk/types': 3.973.5
'@smithy/property-provider': 4.2.11
'@smithy/shared-ini-file-loader': 4.4.6
'@smithy/types': 4.13.0
tslib: 2.8.1
transitivePeerDependencies:
- aws-crt
'@aws-sdk/credential-provider-sso@3.972.19':
dependencies:
'@aws-sdk/core': 3.973.19
@ -7685,18 +7563,6 @@ snapshots:
transitivePeerDependencies:
- aws-crt
'@aws-sdk/credential-provider-web-identity@3.972.18':
dependencies:
'@aws-sdk/core': 3.973.19
'@aws-sdk/nested-clients': 3.996.8
'@aws-sdk/types': 3.973.5
'@smithy/property-provider': 4.2.11
'@smithy/shared-ini-file-loader': 4.4.6
'@smithy/types': 4.13.0
tslib: 2.8.1
transitivePeerDependencies:
- aws-crt
'@aws-sdk/credential-provider-web-identity@3.972.19':
dependencies:
'@aws-sdk/core': 3.973.19
@ -7961,49 +7827,6 @@ snapshots:
transitivePeerDependencies:
- aws-crt
'@aws-sdk/nested-clients@3.996.8':
dependencies:
'@aws-crypto/sha256-browser': 5.2.0
'@aws-crypto/sha256-js': 5.2.0
'@aws-sdk/core': 3.973.19
'@aws-sdk/middleware-host-header': 3.972.7
'@aws-sdk/middleware-logger': 3.972.7
'@aws-sdk/middleware-recursion-detection': 3.972.7
'@aws-sdk/middleware-user-agent': 3.972.20
'@aws-sdk/region-config-resolver': 3.972.7
'@aws-sdk/types': 3.973.5
'@aws-sdk/util-endpoints': 3.996.4
'@aws-sdk/util-user-agent-browser': 3.972.7
'@aws-sdk/util-user-agent-node': 3.973.5
'@smithy/config-resolver': 4.4.10
'@smithy/core': 3.23.9
'@smithy/fetch-http-handler': 5.3.13
'@smithy/hash-node': 4.2.11
'@smithy/invalid-dependency': 4.2.11
'@smithy/middleware-content-length': 4.2.11
'@smithy/middleware-endpoint': 4.4.23
'@smithy/middleware-retry': 4.4.40
'@smithy/middleware-serde': 4.2.12
'@smithy/middleware-stack': 4.2.11
'@smithy/node-config-provider': 4.3.11
'@smithy/node-http-handler': 4.4.14
'@smithy/protocol-http': 5.3.11
'@smithy/smithy-client': 4.12.3
'@smithy/types': 4.13.0
'@smithy/url-parser': 4.2.11
'@smithy/util-base64': 4.3.2
'@smithy/util-body-length-browser': 4.2.2
'@smithy/util-body-length-node': 4.2.3
'@smithy/util-defaults-mode-browser': 4.3.39
'@smithy/util-defaults-mode-node': 4.2.42
'@smithy/util-endpoints': 3.3.2
'@smithy/util-middleware': 4.2.11
'@smithy/util-retry': 4.2.11
'@smithy/util-utf8': 4.2.2
tslib: 2.8.1
transitivePeerDependencies:
- aws-crt
'@aws-sdk/nested-clients@3.996.9':
dependencies:
'@aws-crypto/sha256-browser': 5.2.0
@ -8095,30 +7918,6 @@ snapshots:
transitivePeerDependencies:
- aws-crt
'@aws-sdk/token-providers@3.1005.0':
dependencies:
'@aws-sdk/core': 3.973.19
'@aws-sdk/nested-clients': 3.996.8
'@aws-sdk/types': 3.973.5
'@smithy/property-provider': 4.2.11
'@smithy/shared-ini-file-loader': 4.4.6
'@smithy/types': 4.13.0
tslib: 2.8.1
transitivePeerDependencies:
- aws-crt
'@aws-sdk/token-providers@3.1007.0':
dependencies:
'@aws-sdk/core': 3.973.19
'@aws-sdk/nested-clients': 3.996.8
'@aws-sdk/types': 3.973.5
'@smithy/property-provider': 4.2.11
'@smithy/shared-ini-file-loader': 4.4.6
'@smithy/types': 4.13.0
tslib: 2.8.1
transitivePeerDependencies:
- aws-crt
'@aws-sdk/token-providers@3.1008.0':
dependencies:
'@aws-sdk/core': 3.973.19
@ -8225,14 +8024,6 @@ snapshots:
'@smithy/types': 4.13.0
tslib: 2.8.1
'@aws-sdk/util-user-agent-node@3.973.5':
dependencies:
'@aws-sdk/middleware-user-agent': 3.972.20
'@aws-sdk/types': 3.973.5
'@smithy/node-config-provider': 4.3.11
'@smithy/types': 4.13.0
tslib: 2.8.1
'@aws-sdk/util-user-agent-node@3.973.6':
dependencies:
'@aws-sdk/middleware-user-agent': 3.972.20
@ -8645,12 +8436,14 @@ snapshots:
optionalDependencies:
'@noble/hashes': 2.0.1
'@google/genai@1.44.0':
'@google/genai@1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))':
dependencies:
google-auth-library: 10.6.1
p-retry: 4.6.2
protobufjs: 7.5.4
ws: 8.19.0
optionalDependencies:
'@modelcontextprotocol/sdk': 1.27.1(zod@4.3.6)
transitivePeerDependencies:
- bufferutil
- supports-color
@ -8698,7 +8491,6 @@ snapshots:
'@hono/node-server@1.19.10(hono@4.12.7)':
dependencies:
hono: 4.12.7
optional: true
'@huggingface/jinja@0.5.5': {}
@ -9025,9 +8817,9 @@ snapshots:
std-env: 3.10.0
yoctocolors: 2.1.2
'@mariozechner/pi-agent-core@0.57.1(ws@8.19.0)(zod@4.3.6)':
'@mariozechner/pi-agent-core@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
transitivePeerDependencies:
- '@modelcontextprotocol/sdk'
- aws-crt
@ -9037,11 +8829,11 @@ snapshots:
- ws
- zod
'@mariozechner/pi-ai@0.57.1(ws@8.19.0)(zod@4.3.6)':
'@mariozechner/pi-ai@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@anthropic-ai/sdk': 0.73.0(zod@4.3.6)
'@aws-sdk/client-bedrock-runtime': 3.1004.0
'@google/genai': 1.44.0
'@google/genai': 1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))
'@mistralai/mistralai': 1.14.1
'@sinclair/typebox': 0.34.48
ajv: 8.18.0
@ -9061,11 +8853,11 @@ snapshots:
- ws
- zod
'@mariozechner/pi-coding-agent@0.57.1(ws@8.19.0)(zod@4.3.6)':
'@mariozechner/pi-coding-agent@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@mariozechner/jiti': 2.6.5
'@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-agent-core': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-tui': 0.57.1
'@silvia-odwyer/photon-node': 0.3.4
chalk: 5.6.2
@ -9141,6 +8933,28 @@ snapshots:
- bufferutil
- utf-8-validate
'@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)':
dependencies:
'@hono/node-server': 1.19.10(hono@4.12.7)
ajv: 8.18.0
ajv-formats: 3.0.1(ajv@8.18.0)
content-type: 1.0.5
cors: 2.8.6
cross-spawn: 7.0.6
eventsource: 3.0.7
eventsource-parser: 3.0.6
express: 5.2.1
express-rate-limit: 8.3.1(express@5.2.1)
hono: 4.12.7
jose: 6.2.1
json-schema-typed: 8.0.2
pkce-challenge: 5.0.1
raw-body: 3.0.2
zod: 4.3.6
zod-to-json-schema: 3.25.1(zod@4.3.6)
transitivePeerDependencies:
- supports-color
'@mozilla/readability@0.6.0': {}
'@napi-rs/canvas-android-arm64@0.1.95':
@ -11916,6 +11730,11 @@ snapshots:
core-util-is@1.0.3: {}
cors@2.8.6:
dependencies:
object-assign: 4.1.1
vary: 1.1.2
croner@10.0.1: {}
cross-spawn@7.0.6:
@ -12167,6 +11986,12 @@ snapshots:
transitivePeerDependencies:
- bare-abort-controller
eventsource-parser@3.0.6: {}
eventsource@3.0.7:
dependencies:
eventsource-parser: 3.0.6
execa@4.1.0:
dependencies:
cross-spawn: 7.0.6
@ -12183,6 +12008,11 @@ snapshots:
exponential-backoff@3.1.3: {}
express-rate-limit@8.3.1(express@5.2.1):
dependencies:
express: 5.2.1
ip-address: 10.1.0
express@4.22.1:
dependencies:
accepts: 1.3.8
@ -12826,6 +12656,8 @@ snapshots:
jose@4.15.9: {}
jose@6.2.1: {}
js-stringify@1.0.2: {}
js-tokens@10.0.0: {}
@ -12893,6 +12725,8 @@ snapshots:
json-schema-traverse@1.0.0: {}
json-schema-typed@8.0.2: {}
json-schema@0.4.0: {}
json-stringify-safe@5.0.1: {}
@ -13497,81 +13331,6 @@ snapshots:
ws: 8.19.0
zod: 4.3.6
openclaw@2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)):
dependencies:
'@agentclientprotocol/sdk': 0.16.1(zod@4.3.6)
'@aws-sdk/client-bedrock': 3.1007.0
'@buape/carbon': 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1)
'@clack/prompts': 1.1.0
'@discordjs/voice': 0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1)
'@grammyjs/runner': 2.0.3(grammy@1.41.1)
'@grammyjs/transformer-throttler': 1.2.1(grammy@1.41.1)
'@homebridge/ciao': 1.3.5
'@larksuiteoapi/node-sdk': 1.59.0
'@line/bot-sdk': 10.6.0
'@lydell/node-pty': 1.2.0-beta.3
'@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-coding-agent': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-tui': 0.57.1
'@mozilla/readability': 0.6.0
'@napi-rs/canvas': 0.1.95
'@sinclair/typebox': 0.34.48
'@slack/bolt': 4.6.0(@types/express@5.0.6)
'@slack/web-api': 7.14.1
'@whiskeysockets/baileys': 7.0.0-rc.9(audio-decode@2.2.3)(sharp@0.34.5)
ajv: 8.18.0
chalk: 5.6.2
chokidar: 5.0.0
cli-highlight: 2.1.11
commander: 14.0.3
croner: 10.0.1
discord-api-types: 0.38.42
dotenv: 17.3.1
express: 5.2.1
file-type: 21.3.1
grammy: 1.41.1
hono: 4.12.7
https-proxy-agent: 8.0.0
ipaddr.js: 2.3.0
jiti: 2.6.1
json5: 2.2.3
jszip: 3.10.1
linkedom: 0.18.12
long: 5.3.2
markdown-it: 14.1.1
node-edge-tts: 1.2.10
node-llama-cpp: 3.16.2(typescript@5.9.3)
opusscript: 0.1.1
osc-progress: 0.3.0
pdfjs-dist: 5.5.207
playwright-core: 1.58.2
qrcode-terminal: 0.12.0
sharp: 0.34.5
sqlite-vec: 0.1.7-alpha.2
tar: 7.5.11
tslog: 4.10.2
undici: 7.22.0
ws: 8.19.0
yaml: 2.8.2
zod: 4.3.6
transitivePeerDependencies:
- '@discordjs/opus'
- '@modelcontextprotocol/sdk'
- '@types/express'
- audio-decode
- aws-crt
- bufferutil
- canvas
- debug
- encoding
- ffmpeg-static
- jimp
- link-preview-js
- node-opus
- supports-color
- utf-8-validate
opus-decoder@0.7.11:
dependencies:
'@wasm-audio-decoders/common': 9.0.7
@ -13784,6 +13543,8 @@ snapshots:
sonic-boom: 4.2.1
thread-stream: 3.1.0
pkce-challenge@5.0.1: {}
playwright-core@1.58.2: {}
playwright@1.58.2:
@ -14725,8 +14486,6 @@ snapshots:
undici-types@7.18.2: {}
undici@7.22.0: {}
undici@7.24.0: {}
unist-util-is@6.0.1:

View File

@ -5,6 +5,7 @@ import { appendFileSync } from "node:fs";
const DOCS_PATH_RE = /^(docs\/|.*\.mdx?$)/;
const SKILLS_PYTHON_SCOPE_RE = /^skills\//;
const CI_WORKFLOW_SCOPE_RE = /^\.github\/workflows\/ci\.yml$/;
const MACOS_PROTOCOL_GEN_RE =
/^(apps\/macos\/Sources\/OpenClawProtocol\/|apps\/shared\/OpenClawKit\/Sources\/OpenClawProtocol\/)/;
const MACOS_NATIVE_RE = /^(apps\/macos\/|apps\/ios\/|apps\/shared\/|Swabble\/)/;
@ -55,6 +56,12 @@ export function detectChangedScope(changedPaths) {
runSkillsPython = true;
}
if (CI_WORKFLOW_SCOPE_RE.test(path)) {
runMacos = true;
runAndroid = true;
runSkillsPython = true;
}
if (!MACOS_PROTOCOL_GEN_RE.test(path) && MACOS_NATIVE_RE.test(path)) {
runMacos = true;
}

View File

@ -104,11 +104,11 @@ const hostMemoryGiB = Math.floor(os.totalmem() / 1024 ** 3);
const highMemLocalHost = !isCI && hostMemoryGiB >= 96;
const lowMemLocalHost = !isCI && hostMemoryGiB < 64;
const nodeMajor = Number.parseInt(process.versions.node.split(".")[0] ?? "", 10);
// vmForks is a big win for transform/import heavy suites, but Node 24+
// regressed with Vitest's vm runtime in this repo, and low-memory local hosts
// are more likely to hit per-worker V8 heap ceilings. Keep it opt-out via
// OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1.
const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor < 24 : true;
// vmForks is a big win for transform/import heavy suites. Node 24 is stable again
// for the default unit-fast lane after moving the known flaky files to fork-only
// isolation, but Node 25+ still falls back to process forks until re-validated.
// Keep it opt-out via OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1.
const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor <= 24 : true;
const useVmForks =
process.env.OPENCLAW_TEST_VM_FORKS === "1" ||
(process.env.OPENCLAW_TEST_VM_FORKS !== "0" && !isWindows && supportsVmForks && !lowMemLocalHost);

View File

@ -7,7 +7,52 @@ import { createInMemorySessionStore } from "./session.js";
import { AcpGatewayAgent } from "./translator.js";
import { createAcpConnection, createAcpGateway } from "./translator.test-helpers.js";
const TEST_SESSION_ID = "session-1";
const TEST_SESSION_KEY = "agent:main:main";
const TEST_PROMPT = {
sessionId: TEST_SESSION_ID,
prompt: [{ type: "text", text: "hello" }],
_meta: {},
} as unknown as PromptRequest;
describe("acp prompt cwd prefix", () => {
const createStopAfterSendSpy = () =>
vi.fn(async (method: string) => {
if (method === "chat.send") {
throw new Error("stop-after-send");
}
return {};
});
async function runPromptAndCaptureRequest(
options: {
cwd?: string;
prefixCwd?: boolean;
provenanceMode?: "meta" | "meta+receipt";
} = {},
) {
const sessionStore = createInMemorySessionStore();
sessionStore.createSession({
sessionId: TEST_SESSION_ID,
sessionKey: TEST_SESSION_KEY,
cwd: options.cwd ?? path.join(os.homedir(), "openclaw-test"),
});
const requestSpy = createStopAfterSendSpy();
const agent = new AcpGatewayAgent(
createAcpConnection(),
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
{
sessionStore,
prefixCwd: options.prefixCwd,
provenanceMode: options.provenanceMode,
},
);
await expect(agent.prompt(TEST_PROMPT)).rejects.toThrow("stop-after-send");
return requestSpy;
}
async function runPromptWithCwd(cwd: string) {
const pinnedHome = os.homedir();
const previousOpenClawHome = process.env.OPENCLAW_HOME;
@ -15,37 +60,8 @@ describe("acp prompt cwd prefix", () => {
delete process.env.OPENCLAW_HOME;
process.env.HOME = pinnedHome;
const sessionStore = createInMemorySessionStore();
sessionStore.createSession({
sessionId: "session-1",
sessionKey: "agent:main:main",
cwd,
});
const requestSpy = vi.fn(async (method: string) => {
if (method === "chat.send") {
throw new Error("stop-after-send");
}
return {};
});
const agent = new AcpGatewayAgent(
createAcpConnection(),
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
{
sessionStore,
prefixCwd: true,
},
);
try {
await expect(
agent.prompt({
sessionId: "session-1",
prompt: [{ type: "text", text: "hello" }],
_meta: {},
} as unknown as PromptRequest),
).rejects.toThrow("stop-after-send");
return requestSpy;
return await runPromptAndCaptureRequest({ cwd, prefixCwd: true });
} finally {
if (previousOpenClawHome === undefined) {
delete process.env.OPENCLAW_HOME;
@ -83,42 +99,13 @@ describe("acp prompt cwd prefix", () => {
});
it("injects system provenance metadata when enabled", async () => {
const sessionStore = createInMemorySessionStore();
sessionStore.createSession({
sessionId: "session-1",
sessionKey: "agent:main:main",
cwd: path.join(os.homedir(), "openclaw-test"),
});
const requestSpy = vi.fn(async (method: string) => {
if (method === "chat.send") {
throw new Error("stop-after-send");
}
return {};
});
const agent = new AcpGatewayAgent(
createAcpConnection(),
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
{
sessionStore,
provenanceMode: "meta",
},
);
await expect(
agent.prompt({
sessionId: "session-1",
prompt: [{ type: "text", text: "hello" }],
_meta: {},
} as unknown as PromptRequest),
).rejects.toThrow("stop-after-send");
const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta" });
expect(requestSpy).toHaveBeenCalledWith(
"chat.send",
expect.objectContaining({
systemInputProvenance: {
kind: "external_user",
originSessionId: "session-1",
originSessionId: TEST_SESSION_ID,
sourceChannel: "acp",
sourceTool: "openclaw_acp",
},
@ -129,42 +116,13 @@ describe("acp prompt cwd prefix", () => {
});
it("injects a system provenance receipt when requested", async () => {
const sessionStore = createInMemorySessionStore();
sessionStore.createSession({
sessionId: "session-1",
sessionKey: "agent:main:main",
cwd: path.join(os.homedir(), "openclaw-test"),
});
const requestSpy = vi.fn(async (method: string) => {
if (method === "chat.send") {
throw new Error("stop-after-send");
}
return {};
});
const agent = new AcpGatewayAgent(
createAcpConnection(),
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
{
sessionStore,
provenanceMode: "meta+receipt",
},
);
await expect(
agent.prompt({
sessionId: "session-1",
prompt: [{ type: "text", text: "hello" }],
_meta: {},
} as unknown as PromptRequest),
).rejects.toThrow("stop-after-send");
const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta+receipt" });
expect(requestSpy).toHaveBeenCalledWith(
"chat.send",
expect.objectContaining({
systemInputProvenance: {
kind: "external_user",
originSessionId: "session-1",
originSessionId: TEST_SESSION_ID,
sourceChannel: "acp",
sourceTool: "openclaw_acp",
},
@ -182,14 +140,14 @@ describe("acp prompt cwd prefix", () => {
expect(requestSpy).toHaveBeenCalledWith(
"chat.send",
expect.objectContaining({
systemProvenanceReceipt: expect.stringContaining("originSessionId=session-1"),
systemProvenanceReceipt: expect.stringContaining(`originSessionId=${TEST_SESSION_ID}`),
}),
{ expectFinal: true },
);
expect(requestSpy).toHaveBeenCalledWith(
"chat.send",
expect.objectContaining({
systemProvenanceReceipt: expect.stringContaining("targetSession=agent:main:main"),
systemProvenanceReceipt: expect.stringContaining(`targetSession=${TEST_SESSION_KEY}`),
}),
{ expectFinal: true },
);

View File

@ -1,10 +1,5 @@
import type { AgentToolResult } from "@mariozechner/pi-agent-core";
import { loadConfig } from "../config/config.js";
import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js";
import {
hasConfiguredExecApprovalDmRoute,
resolveExecApprovalInitiatingSurfaceState,
} from "../infra/exec-approval-surface.js";
import {
addAllowlistEntry,
type ExecAsk,
@ -26,7 +21,7 @@ import {
registerExecApprovalRequestForHostOrThrow,
} from "./bash-tools.exec-approval-request.js";
import {
createDefaultExecApprovalRequestContext,
createAndRegisterDefaultExecApprovalRequest,
resolveBaseExecApprovalDecision,
resolveApprovalDecisionOrUndefined,
resolveExecHostApprovalContext,
@ -149,52 +144,36 @@ export async function processGatewayAllowlist(
approvalId,
approvalSlug,
warningText,
expiresAtMs: defaultExpiresAtMs,
preResolvedDecision: defaultPreResolvedDecision,
} = createDefaultExecApprovalRequestContext({
expiresAtMs,
preResolvedDecision,
initiatingSurface,
sentApproverDms,
unavailableReason,
} = await createAndRegisterDefaultExecApprovalRequest({
warnings: params.warnings,
approvalRunningNoticeMs: params.approvalRunningNoticeMs,
createApprovalSlug,
turnSourceChannel: params.turnSourceChannel,
turnSourceAccountId: params.turnSourceAccountId,
register: async (approvalId) =>
await registerExecApprovalRequestForHostOrThrow({
approvalId,
command: params.command,
workdir: params.workdir,
host: "gateway",
security: hostSecurity,
ask: hostAsk,
...buildExecApprovalRequesterContext({
agentId: params.agentId,
sessionKey: params.sessionKey,
}),
resolvedPath: allowlistEval.segments[0]?.resolution?.resolvedPath,
...buildExecApprovalTurnSourceContext(params),
}),
});
const resolvedPath = allowlistEval.segments[0]?.resolution?.resolvedPath;
const effectiveTimeout =
typeof params.timeoutSec === "number" ? params.timeoutSec : params.defaultTimeoutSec;
let expiresAtMs = defaultExpiresAtMs;
let preResolvedDecision = defaultPreResolvedDecision;
// Register first so the returned approval ID is actionable immediately.
const registration = await registerExecApprovalRequestForHostOrThrow({
approvalId,
command: params.command,
workdir: params.workdir,
host: "gateway",
security: hostSecurity,
ask: hostAsk,
...buildExecApprovalRequesterContext({
agentId: params.agentId,
sessionKey: params.sessionKey,
}),
resolvedPath,
...buildExecApprovalTurnSourceContext(params),
});
expiresAtMs = registration.expiresAtMs;
preResolvedDecision = registration.finalDecision;
const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({
channel: params.turnSourceChannel,
accountId: params.turnSourceAccountId,
});
const cfg = loadConfig();
const sentApproverDms =
(initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") &&
hasConfiguredExecApprovalDmRoute(cfg);
const unavailableReason =
preResolvedDecision === null
? "no-approval-route"
: initiatingSurface.kind === "disabled"
? "initiating-platform-disabled"
: initiatingSurface.kind === "unsupported"
? "initiating-platform-unsupported"
: null;
void (async () => {
const decision = await resolveApprovalDecisionOrUndefined({

View File

@ -1,11 +1,6 @@
import crypto from "node:crypto";
import type { AgentToolResult } from "@mariozechner/pi-agent-core";
import { loadConfig } from "../config/config.js";
import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js";
import {
hasConfiguredExecApprovalDmRoute,
resolveExecApprovalInitiatingSurfaceState,
} from "../infra/exec-approval-surface.js";
import {
type ExecApprovalsFile,
type ExecAsk,
@ -25,7 +20,7 @@ import {
registerExecApprovalRequestForHostOrThrow,
} from "./bash-tools.exec-approval-request.js";
import {
createDefaultExecApprovalRequestContext,
createAndRegisterDefaultExecApprovalRequest,
resolveBaseExecApprovalDecision,
resolveApprovalDecisionOrUndefined,
resolveExecHostApprovalContext,
@ -225,50 +220,34 @@ export async function executeNodeHostCommand(
approvalId,
approvalSlug,
warningText,
expiresAtMs: defaultExpiresAtMs,
preResolvedDecision: defaultPreResolvedDecision,
} = createDefaultExecApprovalRequestContext({
expiresAtMs,
preResolvedDecision,
initiatingSurface,
sentApproverDms,
unavailableReason,
} = await createAndRegisterDefaultExecApprovalRequest({
warnings: params.warnings,
approvalRunningNoticeMs: params.approvalRunningNoticeMs,
createApprovalSlug,
turnSourceChannel: params.turnSourceChannel,
turnSourceAccountId: params.turnSourceAccountId,
register: async (approvalId) =>
await registerExecApprovalRequestForHostOrThrow({
approvalId,
systemRunPlan: prepared.plan,
env: nodeEnv,
workdir: runCwd,
host: "node",
nodeId,
security: hostSecurity,
ask: hostAsk,
...buildExecApprovalRequesterContext({
agentId: runAgentId,
sessionKey: runSessionKey,
}),
...buildExecApprovalTurnSourceContext(params),
}),
});
let expiresAtMs = defaultExpiresAtMs;
let preResolvedDecision = defaultPreResolvedDecision;
// Register first so the returned approval ID is actionable immediately.
const registration = await registerExecApprovalRequestForHostOrThrow({
approvalId,
systemRunPlan: prepared.plan,
env: nodeEnv,
workdir: runCwd,
host: "node",
nodeId,
security: hostSecurity,
ask: hostAsk,
...buildExecApprovalRequesterContext({
agentId: runAgentId,
sessionKey: runSessionKey,
}),
...buildExecApprovalTurnSourceContext(params),
});
expiresAtMs = registration.expiresAtMs;
preResolvedDecision = registration.finalDecision;
const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({
channel: params.turnSourceChannel,
accountId: params.turnSourceAccountId,
});
const cfg = loadConfig();
const sentApproverDms =
(initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") &&
hasConfiguredExecApprovalDmRoute(cfg);
const unavailableReason =
preResolvedDecision === null
? "no-approval-route"
: initiatingSurface.kind === "disabled"
? "initiating-platform-disabled"
: initiatingSurface.kind === "unsupported"
? "initiating-platform-unsupported"
: null;
void (async () => {
const decision = await resolveApprovalDecisionOrUndefined({

View File

@ -1,4 +1,10 @@
import crypto from "node:crypto";
import { loadConfig } from "../config/config.js";
import {
hasConfiguredExecApprovalDmRoute,
type ExecApprovalInitiatingSurfaceState,
resolveExecApprovalInitiatingSurfaceState,
} from "../infra/exec-approval-surface.js";
import {
maxAsk,
minSecurity,
@ -6,7 +12,10 @@ import {
type ExecAsk,
type ExecSecurity,
} from "../infra/exec-approvals.js";
import { resolveRegisteredExecApprovalDecision } from "./bash-tools.exec-approval-request.js";
import {
type ExecApprovalRegistration,
resolveRegisteredExecApprovalDecision,
} from "./bash-tools.exec-approval-request.js";
import { DEFAULT_APPROVAL_TIMEOUT_MS } from "./bash-tools.exec-runtime.js";
type ResolvedExecApprovals = ReturnType<typeof resolveExecApprovals>;
@ -28,6 +37,22 @@ export type ExecApprovalRequestState = ExecApprovalPendingState & {
noticeSeconds: number;
};
export type ExecApprovalUnavailableReason =
| "no-approval-route"
| "initiating-platform-disabled"
| "initiating-platform-unsupported";
export type RegisteredExecApprovalRequestContext = {
approvalId: string;
approvalSlug: string;
warningText: string;
expiresAtMs: number;
preResolvedDecision: string | null | undefined;
initiatingSurface: ExecApprovalInitiatingSurfaceState;
sentApproverDms: boolean;
unavailableReason: ExecApprovalUnavailableReason | null;
};
export function createExecApprovalPendingState(params: {
warnings: string[];
timeoutMs: number;
@ -158,3 +183,77 @@ export async function resolveApprovalDecisionOrUndefined(params: {
return undefined;
}
}
export function resolveExecApprovalUnavailableState(params: {
turnSourceChannel?: string;
turnSourceAccountId?: string;
preResolvedDecision: string | null | undefined;
}): {
initiatingSurface: ExecApprovalInitiatingSurfaceState;
sentApproverDms: boolean;
unavailableReason: ExecApprovalUnavailableReason | null;
} {
const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({
channel: params.turnSourceChannel,
accountId: params.turnSourceAccountId,
});
const sentApproverDms =
(initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") &&
hasConfiguredExecApprovalDmRoute(loadConfig());
const unavailableReason =
params.preResolvedDecision === null
? "no-approval-route"
: initiatingSurface.kind === "disabled"
? "initiating-platform-disabled"
: initiatingSurface.kind === "unsupported"
? "initiating-platform-unsupported"
: null;
return {
initiatingSurface,
sentApproverDms,
unavailableReason,
};
}
export async function createAndRegisterDefaultExecApprovalRequest(params: {
warnings: string[];
approvalRunningNoticeMs: number;
createApprovalSlug: (approvalId: string) => string;
turnSourceChannel?: string;
turnSourceAccountId?: string;
register: (approvalId: string) => Promise<ExecApprovalRegistration>;
}): Promise<RegisteredExecApprovalRequestContext> {
const {
approvalId,
approvalSlug,
warningText,
expiresAtMs: defaultExpiresAtMs,
preResolvedDecision: defaultPreResolvedDecision,
} = createDefaultExecApprovalRequestContext({
warnings: params.warnings,
approvalRunningNoticeMs: params.approvalRunningNoticeMs,
createApprovalSlug: params.createApprovalSlug,
});
const registration = await params.register(approvalId);
const preResolvedDecision = registration.finalDecision;
const { initiatingSurface, sentApproverDms, unavailableReason } =
resolveExecApprovalUnavailableState({
turnSourceChannel: params.turnSourceChannel,
turnSourceAccountId: params.turnSourceAccountId,
preResolvedDecision,
});
return {
approvalId,
approvalSlug,
warningText,
expiresAtMs: registration.expiresAtMs ?? defaultExpiresAtMs,
preResolvedDecision:
registration.finalDecision === undefined
? defaultPreResolvedDecision
: registration.finalDecision,
initiatingSurface,
sentApproverDms,
unavailableReason,
};
}

View File

@ -43,6 +43,162 @@ function buildPreparedSystemRunPayload(rawInvokeParams: unknown) {
return buildSystemRunPreparePayload(params);
}
function getTestConfigPath() {
return path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
}
async function writeOpenClawConfig(config: Record<string, unknown>, pretty = false) {
const configPath = getTestConfigPath();
await fs.mkdir(path.dirname(configPath), { recursive: true });
await fs.writeFile(configPath, JSON.stringify(config, null, pretty ? 2 : undefined));
}
async function writeExecApprovalsConfig(config: Record<string, unknown>) {
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
await fs.writeFile(approvalsPath, JSON.stringify(config, null, 2));
}
function acceptedApprovalResponse(params: unknown) {
return { status: "accepted", id: (params as { id?: string })?.id };
}
function getResultText(result: { content: Array<{ type?: string; text?: string }> }) {
return result.content.find((part) => part.type === "text")?.text ?? "";
}
function expectPendingApprovalText(
result: {
details: { status?: string };
content: Array<{ type?: string; text?: string }>;
},
options: {
command: string;
host: "gateway" | "node";
nodeId?: string;
interactive?: boolean;
},
) {
expect(result.details.status).toBe("approval-pending");
const details = result.details as { approvalId: string; approvalSlug: string };
const pendingText = getResultText(result);
expect(pendingText).toContain(
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
);
expect(pendingText).toContain(`full ${details.approvalId}`);
expect(pendingText).toContain(`Host: ${options.host}`);
if (options.nodeId) {
expect(pendingText).toContain(`Node: ${options.nodeId}`);
}
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
expect(pendingText).toContain("Command:\n```sh\n");
expect(pendingText).toContain(options.command);
if (options.interactive) {
expect(pendingText).toContain("Mode: foreground (interactive approvals available).");
expect(pendingText).toContain("Background mode requires pre-approved policy");
}
return details;
}
function expectPendingCommandText(
result: {
details: { status?: string };
content: Array<{ type?: string; text?: string }>;
},
command: string,
) {
expect(result.details.status).toBe("approval-pending");
const text = getResultText(result);
expect(text).toContain("Command:\n```sh\n");
expect(text).toContain(command);
}
function mockGatewayOkCalls(calls: string[]) {
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
calls.push(method);
return { ok: true };
});
}
function createElevatedAllowlistExecTool() {
return createExecTool({
ask: "on-miss",
security: "allowlist",
approvalRunningNoticeMs: 0,
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
});
}
async function expectGatewayExecWithoutApproval(options: {
config: Record<string, unknown>;
command: string;
ask?: "always" | "on-miss" | "off";
}) {
await writeExecApprovalsConfig(options.config);
const calls: string[] = [];
mockGatewayOkCalls(calls);
const tool = createExecTool({
host: "gateway",
ask: options.ask,
security: "full",
approvalRunningNoticeMs: 0,
});
const result = await tool.execute("call-no-approval", { command: options.command });
expect(result.details.status).toBe("completed");
expect(calls).not.toContain("exec.approval.request");
expect(calls).not.toContain("exec.approval.waitDecision");
}
function mockAcceptedApprovalFlow(options: {
onAgent?: (params: Record<string, unknown>) => void;
onNodeInvoke?: (params: unknown) => unknown;
}) {
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
if (method === "exec.approval.request") {
return acceptedApprovalResponse(params);
}
if (method === "exec.approval.waitDecision") {
return { decision: "allow-once" };
}
if (method === "agent" && options.onAgent) {
options.onAgent(params as Record<string, unknown>);
return { status: "ok" };
}
if (method === "node.invoke" && options.onNodeInvoke) {
return await options.onNodeInvoke(params);
}
return { ok: true };
});
}
function mockPendingApprovalRegistration() {
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
if (method === "exec.approval.request") {
return { status: "accepted", id: "approval-id" };
}
if (method === "exec.approval.waitDecision") {
return { decision: null };
}
return { ok: true };
});
}
function expectApprovalUnavailableText(result: {
details: { status?: string };
content: Array<{ type?: string; text?: string }>;
}) {
expect(result.details.status).toBe("approval-unavailable");
const text = result.content.find((part) => part.type === "text")?.text ?? "";
expect(text).not.toContain("/approve");
expect(text).not.toContain("npm view diver name version description");
expect(text).not.toContain("Pending command:");
expect(text).not.toContain("Host:");
expect(text).not.toContain("CWD:");
return text;
}
describe("exec approvals", () => {
let previousHome: string | undefined;
let previousUserProfile: string | undefined;
@ -81,18 +237,11 @@ describe("exec approvals", () => {
let invokeParams: unknown;
let agentParams: unknown;
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
if (method === "exec.approval.request") {
return { status: "accepted", id: (params as { id?: string })?.id };
}
if (method === "exec.approval.waitDecision") {
return { decision: "allow-once" };
}
if (method === "agent") {
mockAcceptedApprovalFlow({
onAgent: (params) => {
agentParams = params;
return { status: "ok" };
}
if (method === "node.invoke") {
},
onNodeInvoke: (params) => {
const invoke = params as { command?: string };
if (invoke.command === "system.run.prepare") {
return buildPreparedSystemRunPayload(params);
@ -101,8 +250,7 @@ describe("exec approvals", () => {
invokeParams = params;
return { payload: { success: true, stdout: "ok" } };
}
}
return { ok: true };
},
});
const tool = createExecTool({
@ -113,19 +261,12 @@ describe("exec approvals", () => {
});
const result = await tool.execute("call1", { command: "ls -la" });
expect(result.details.status).toBe("approval-pending");
const details = result.details as { approvalId: string; approvalSlug: string };
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
expect(pendingText).toContain(
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
);
expect(pendingText).toContain(`full ${details.approvalId}`);
expect(pendingText).toContain("Host: node");
expect(pendingText).toContain("Node: node-1");
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
expect(pendingText).toContain("Command:\n```sh\nls -la\n```");
expect(pendingText).toContain("Mode: foreground (interactive approvals available).");
expect(pendingText).toContain("Background mode requires pre-approved policy");
const details = expectPendingApprovalText(result, {
command: "ls -la",
host: "node",
nodeId: "node-1",
interactive: true,
});
const approvalId = details.approvalId;
await expect
@ -214,74 +355,28 @@ describe("exec approvals", () => {
});
it("uses exec-approvals ask=off to suppress gateway prompts", async () => {
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
await fs.writeFile(
approvalsPath,
JSON.stringify(
{
version: 1,
defaults: { security: "full", ask: "off", askFallback: "full" },
agents: {
main: { security: "full", ask: "off", askFallback: "full" },
},
await expectGatewayExecWithoutApproval({
config: {
version: 1,
defaults: { security: "full", ask: "off", askFallback: "full" },
agents: {
main: { security: "full", ask: "off", askFallback: "full" },
},
null,
2,
),
);
const calls: string[] = [];
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
calls.push(method);
return { ok: true };
});
const tool = createExecTool({
host: "gateway",
},
command: "echo ok",
ask: "on-miss",
security: "full",
approvalRunningNoticeMs: 0,
});
const result = await tool.execute("call3b", { command: "echo ok" });
expect(result.details.status).toBe("completed");
expect(calls).not.toContain("exec.approval.request");
expect(calls).not.toContain("exec.approval.waitDecision");
});
it("inherits ask=off from exec-approvals defaults when tool ask is unset", async () => {
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
await fs.writeFile(
approvalsPath,
JSON.stringify(
{
version: 1,
defaults: { security: "full", ask: "off", askFallback: "full" },
agents: {},
},
null,
2,
),
);
const calls: string[] = [];
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
calls.push(method);
return { ok: true };
await expectGatewayExecWithoutApproval({
config: {
version: 1,
defaults: { security: "full", ask: "off", askFallback: "full" },
agents: {},
},
command: "echo ok",
});
const tool = createExecTool({
host: "gateway",
security: "full",
approvalRunningNoticeMs: 0,
});
const result = await tool.execute("call3c", { command: "echo ok" });
expect(result.details.status).toBe("completed");
expect(calls).not.toContain("exec.approval.request");
expect(calls).not.toContain("exec.approval.waitDecision");
});
it("requires approval for elevated ask when allowlist misses", async () => {
@ -296,7 +391,7 @@ describe("exec approvals", () => {
if (method === "exec.approval.request") {
resolveApproval?.();
// Return registration confirmation
return { status: "accepted", id: (params as { id?: string })?.id };
return acceptedApprovalResponse(params);
}
if (method === "exec.approval.waitDecision") {
return { decision: "deny" };
@ -304,24 +399,10 @@ describe("exec approvals", () => {
return { ok: true };
});
const tool = createExecTool({
ask: "on-miss",
security: "allowlist",
approvalRunningNoticeMs: 0,
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
});
const tool = createElevatedAllowlistExecTool();
const result = await tool.execute("call4", { command: "echo ok", elevated: true });
expect(result.details.status).toBe("approval-pending");
const details = result.details as { approvalId: string; approvalSlug: string };
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
expect(pendingText).toContain(
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
);
expect(pendingText).toContain(`full ${details.approvalId}`);
expect(pendingText).toContain("Host: gateway");
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
expect(pendingText).toContain("Command:\n```sh\necho ok\n```");
expectPendingApprovalText(result, { command: "echo ok", host: "gateway" });
await approvalSeen;
expect(calls).toContain("exec.approval.request");
expect(calls).toContain("exec.approval.waitDecision");
@ -330,18 +411,10 @@ describe("exec approvals", () => {
it("starts a direct agent follow-up after approved gateway exec completes", async () => {
const agentCalls: Array<Record<string, unknown>> = [];
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
if (method === "exec.approval.request") {
return { status: "accepted", id: (params as { id?: string })?.id };
}
if (method === "exec.approval.waitDecision") {
return { decision: "allow-once" };
}
if (method === "agent") {
agentCalls.push(params as Record<string, unknown>);
return { status: "ok" };
}
return { ok: true };
mockAcceptedApprovalFlow({
onAgent: (params) => {
agentCalls.push(params);
},
});
const tool = createExecTool({
@ -388,7 +461,7 @@ describe("exec approvals", () => {
if (typeof request.id === "string") {
requestIds.push(request.id);
}
return { status: "accepted", id: request.id };
return acceptedApprovalResponse(request);
}
if (method === "exec.approval.waitDecision") {
const wait = params as { id?: string };
@ -400,12 +473,7 @@ describe("exec approvals", () => {
return { ok: true };
});
const tool = createExecTool({
ask: "on-miss",
security: "allowlist",
approvalRunningNoticeMs: 0,
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
});
const tool = createElevatedAllowlistExecTool();
const first = await tool.execute("call-seq-1", {
command: "npm view diver --json",
@ -429,7 +497,7 @@ describe("exec approvals", () => {
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
calls.push(method);
if (method === "exec.approval.request") {
return { status: "accepted", id: (params as { id?: string })?.id };
return acceptedApprovalResponse(params);
}
if (method === "exec.approval.waitDecision") {
return { decision: "deny" };
@ -448,11 +516,7 @@ describe("exec approvals", () => {
command: "npm view diver --json | jq .name && brew outdated",
});
expect(result.details.status).toBe("approval-pending");
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
expect(pendingText).toContain(
"Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```",
);
expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated");
expect(calls).toContain("exec.approval.request");
});
@ -480,11 +544,7 @@ describe("exec approvals", () => {
command: "npm view diver --json | jq .name && brew outdated",
});
expect(result.details.status).toBe("approval-pending");
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
expect(pendingText).toContain(
"Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```",
);
expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated");
expect(calls).toContain("exec.approval.request");
});
@ -551,30 +611,17 @@ describe("exec approvals", () => {
});
it("returns an unavailable approval message instead of a local /approve prompt when discord exec approvals are disabled", async () => {
const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
await fs.mkdir(path.dirname(configPath), { recursive: true });
await fs.writeFile(
configPath,
JSON.stringify({
channels: {
discord: {
enabled: true,
execApprovals: { enabled: false },
},
await writeOpenClawConfig({
channels: {
discord: {
enabled: true,
execApprovals: { enabled: false },
},
}),
);
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
if (method === "exec.approval.request") {
return { status: "accepted", id: "approval-id" };
}
if (method === "exec.approval.waitDecision") {
return { decision: null };
}
return { ok: true };
},
});
mockPendingApprovalRegistration();
const tool = createExecTool({
host: "gateway",
ask: "always",
@ -588,49 +635,29 @@ describe("exec approvals", () => {
command: "npm view diver name version description",
});
expect(result.details.status).toBe("approval-unavailable");
const text = result.content.find((part) => part.type === "text")?.text ?? "";
const text = expectApprovalUnavailableText(result);
expect(text).toContain("chat exec approvals are not enabled on Discord");
expect(text).toContain("Web UI or terminal UI");
expect(text).not.toContain("/approve");
expect(text).not.toContain("npm view diver name version description");
expect(text).not.toContain("Pending command:");
expect(text).not.toContain("Host:");
expect(text).not.toContain("CWD:");
});
it("tells Telegram users that allowed approvers were DMed when Telegram approvals are disabled but Discord DM approvals are enabled", async () => {
const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
await fs.mkdir(path.dirname(configPath), { recursive: true });
await fs.writeFile(
configPath,
JSON.stringify(
{
channels: {
telegram: {
enabled: true,
execApprovals: { enabled: false },
},
discord: {
enabled: true,
execApprovals: { enabled: true, approvers: ["123"], target: "dm" },
},
await writeOpenClawConfig(
{
channels: {
telegram: {
enabled: true,
execApprovals: { enabled: false },
},
discord: {
enabled: true,
execApprovals: { enabled: true, approvers: ["123"], target: "dm" },
},
},
null,
2,
),
},
true,
);
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
if (method === "exec.approval.request") {
return { status: "accepted", id: "approval-id" };
}
if (method === "exec.approval.waitDecision") {
return { decision: null };
}
return { ok: true };
});
mockPendingApprovalRegistration();
const tool = createExecTool({
host: "gateway",
@ -645,14 +672,8 @@ describe("exec approvals", () => {
command: "npm view diver name version description",
});
expect(result.details.status).toBe("approval-unavailable");
const text = result.content.find((part) => part.type === "text")?.text ?? "";
const text = expectApprovalUnavailableText(result);
expect(text).toContain("Approval required. I sent the allowed approvers DMs.");
expect(text).not.toContain("/approve");
expect(text).not.toContain("npm view diver name version description");
expect(text).not.toContain("Pending command:");
expect(text).not.toContain("Host:");
expect(text).not.toContain("CWD:");
});
it("denies node obfuscated command when approval request times out", async () => {

View File

@ -46,6 +46,20 @@ function expectFallbackUsed(
expect(result.attempts[0]?.reason).toBe("rate_limit");
}
function expectPrimarySkippedForReason(
result: { result: unknown; attempts: Array<{ reason?: string }> },
run: {
(...args: unknown[]): unknown;
mock: { calls: unknown[][] };
},
reason: string,
) {
expect(result.result).toBe("ok");
expect(run).toHaveBeenCalledTimes(1);
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
expect(result.attempts[0]?.reason).toBe(reason);
}
function expectPrimaryProbeSuccess(
result: { result: unknown },
run: {
@ -183,11 +197,7 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("ok");
const result = await runPrimaryCandidate(cfg, run);
expect(result.result).toBe("ok");
expect(run).toHaveBeenCalledTimes(1);
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
expect(result.attempts[0]?.reason).toBe("billing");
expectPrimarySkippedForReason(result, run, "billing");
});
it("probes primary model when within 2-min margin of cooldown expiry", async () => {
@ -540,10 +550,6 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("ok");
const result = await runPrimaryCandidate(cfg, run);
expect(result.result).toBe("ok");
expect(run).toHaveBeenCalledTimes(1);
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
expect(result.attempts[0]?.reason).toBe("billing");
expectPrimarySkippedForReason(result, run, "billing");
});
});

View File

@ -80,131 +80,121 @@ describe("model-selection", () => {
});
describe("parseModelRef", () => {
it("should parse full model refs", () => {
expect(parseModelRef("anthropic/claude-3-5-sonnet", "openai")).toEqual({
provider: "anthropic",
model: "claude-3-5-sonnet",
});
const expectParsedModelVariants = (
variants: string[],
defaultProvider: string,
expected: { provider: string; model: string },
) => {
for (const raw of variants) {
expect(parseModelRef(raw, defaultProvider), raw).toEqual(expected);
}
};
it.each([
{
name: "parses explicit provider/model refs",
variants: ["anthropic/claude-3-5-sonnet"],
defaultProvider: "openai",
expected: { provider: "anthropic", model: "claude-3-5-sonnet" },
},
{
name: "uses the default provider when omitted",
variants: ["claude-3-5-sonnet"],
defaultProvider: "anthropic",
expected: { provider: "anthropic", model: "claude-3-5-sonnet" },
},
{
name: "preserves nested model ids after the provider prefix",
variants: ["nvidia/moonshotai/kimi-k2.5"],
defaultProvider: "anthropic",
expected: { provider: "nvidia", model: "moonshotai/kimi-k2.5" },
},
{
name: "normalizes anthropic shorthand aliases",
variants: ["anthropic/opus-4.6", "opus-4.6", " anthropic / opus-4.6 "],
defaultProvider: "anthropic",
expected: { provider: "anthropic", model: "claude-opus-4-6" },
},
{
name: "normalizes anthropic sonnet aliases",
variants: ["anthropic/sonnet-4.6", "sonnet-4.6"],
defaultProvider: "anthropic",
expected: { provider: "anthropic", model: "claude-sonnet-4-6" },
},
{
name: "normalizes deprecated google flash preview ids",
variants: ["google/gemini-3.1-flash-preview", "gemini-3.1-flash-preview"],
defaultProvider: "google",
expected: { provider: "google", model: "gemini-3-flash-preview" },
},
{
name: "normalizes gemini 3.1 flash-lite ids",
variants: ["google/gemini-3.1-flash-lite", "gemini-3.1-flash-lite"],
defaultProvider: "google",
expected: { provider: "google", model: "gemini-3.1-flash-lite-preview" },
},
{
name: "keeps OpenAI codex refs on the openai provider",
variants: ["openai/gpt-5.3-codex", "gpt-5.3-codex"],
defaultProvider: "openai",
expected: { provider: "openai", model: "gpt-5.3-codex" },
},
{
name: "preserves openrouter native model prefixes",
variants: ["openrouter/aurora-alpha"],
defaultProvider: "openai",
expected: { provider: "openrouter", model: "openrouter/aurora-alpha" },
},
{
name: "passes through openrouter upstream provider ids",
variants: ["openrouter/anthropic/claude-sonnet-4-5"],
defaultProvider: "openai",
expected: { provider: "openrouter", model: "anthropic/claude-sonnet-4-5" },
},
{
name: "normalizes Vercel Claude shorthand to anthropic-prefixed model ids",
variants: ["vercel-ai-gateway/claude-opus-4.6"],
defaultProvider: "openai",
expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4.6" },
},
{
name: "normalizes Vercel Anthropic aliases without double-prefixing",
variants: ["vercel-ai-gateway/opus-4.6"],
defaultProvider: "openai",
expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4-6" },
},
{
name: "keeps already-prefixed Vercel Anthropic models unchanged",
variants: ["vercel-ai-gateway/anthropic/claude-opus-4.6"],
defaultProvider: "openai",
expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4.6" },
},
{
name: "passes through non-Claude Vercel model ids unchanged",
variants: ["vercel-ai-gateway/openai/gpt-5.2"],
defaultProvider: "openai",
expected: { provider: "vercel-ai-gateway", model: "openai/gpt-5.2" },
},
{
name: "keeps already-suffixed codex variants unchanged",
variants: ["openai/gpt-5.3-codex-codex"],
defaultProvider: "anthropic",
expected: { provider: "openai", model: "gpt-5.3-codex-codex" },
},
])("$name", ({ variants, defaultProvider, expected }) => {
expectParsedModelVariants(variants, defaultProvider, expected);
});
it("preserves nested model ids after provider prefix", () => {
expect(parseModelRef("nvidia/moonshotai/kimi-k2.5", "anthropic")).toEqual({
provider: "nvidia",
model: "moonshotai/kimi-k2.5",
});
it("round-trips normalized refs through modelKey", () => {
const parsed = parseModelRef(" opus-4.6 ", "anthropic");
expect(parsed).toEqual({ provider: "anthropic", model: "claude-opus-4-6" });
expect(modelKey(parsed?.provider ?? "", parsed?.model ?? "")).toBe(
"anthropic/claude-opus-4-6",
);
});
it("normalizes anthropic alias refs to canonical model ids", () => {
expect(parseModelRef("anthropic/opus-4.6", "openai")).toEqual({
provider: "anthropic",
model: "claude-opus-4-6",
});
expect(parseModelRef("opus-4.6", "anthropic")).toEqual({
provider: "anthropic",
model: "claude-opus-4-6",
});
expect(parseModelRef("anthropic/sonnet-4.6", "openai")).toEqual({
provider: "anthropic",
model: "claude-sonnet-4-6",
});
expect(parseModelRef("sonnet-4.6", "anthropic")).toEqual({
provider: "anthropic",
model: "claude-sonnet-4-6",
});
});
it("should use default provider if none specified", () => {
expect(parseModelRef("claude-3-5-sonnet", "anthropic")).toEqual({
provider: "anthropic",
model: "claude-3-5-sonnet",
});
});
it("normalizes deprecated google flash preview ids to the working model id", () => {
expect(parseModelRef("google/gemini-3.1-flash-preview", "openai")).toEqual({
provider: "google",
model: "gemini-3-flash-preview",
});
expect(parseModelRef("gemini-3.1-flash-preview", "google")).toEqual({
provider: "google",
model: "gemini-3-flash-preview",
});
});
it("normalizes gemini 3.1 flash-lite to the preview model id", () => {
expect(parseModelRef("google/gemini-3.1-flash-lite", "openai")).toEqual({
provider: "google",
model: "gemini-3.1-flash-lite-preview",
});
expect(parseModelRef("gemini-3.1-flash-lite", "google")).toEqual({
provider: "google",
model: "gemini-3.1-flash-lite-preview",
});
});
it("keeps openai gpt-5.3 codex refs on the openai provider", () => {
expect(parseModelRef("openai/gpt-5.3-codex", "anthropic")).toEqual({
provider: "openai",
model: "gpt-5.3-codex",
});
expect(parseModelRef("gpt-5.3-codex", "openai")).toEqual({
provider: "openai",
model: "gpt-5.3-codex",
});
expect(parseModelRef("openai/gpt-5.3-codex-codex", "anthropic")).toEqual({
provider: "openai",
model: "gpt-5.3-codex-codex",
});
});
it("should return null for empty strings", () => {
expect(parseModelRef("", "anthropic")).toBeNull();
expect(parseModelRef(" ", "anthropic")).toBeNull();
});
it("should preserve openrouter/ prefix for native models", () => {
expect(parseModelRef("openrouter/aurora-alpha", "openai")).toEqual({
provider: "openrouter",
model: "openrouter/aurora-alpha",
});
});
it("should pass through openrouter external provider models as-is", () => {
expect(parseModelRef("openrouter/anthropic/claude-sonnet-4-5", "openai")).toEqual({
provider: "openrouter",
model: "anthropic/claude-sonnet-4-5",
});
});
it("normalizes Vercel Claude shorthand to anthropic-prefixed model ids", () => {
expect(parseModelRef("vercel-ai-gateway/claude-opus-4.6", "openai")).toEqual({
provider: "vercel-ai-gateway",
model: "anthropic/claude-opus-4.6",
});
expect(parseModelRef("vercel-ai-gateway/opus-4.6", "openai")).toEqual({
provider: "vercel-ai-gateway",
model: "anthropic/claude-opus-4-6",
});
});
it("keeps already-prefixed Vercel Anthropic models unchanged", () => {
expect(parseModelRef("vercel-ai-gateway/anthropic/claude-opus-4.6", "openai")).toEqual({
provider: "vercel-ai-gateway",
model: "anthropic/claude-opus-4.6",
});
});
it("passes through non-Claude Vercel model ids unchanged", () => {
expect(parseModelRef("vercel-ai-gateway/openai/gpt-5.2", "openai")).toEqual({
provider: "vercel-ai-gateway",
model: "openai/gpt-5.2",
});
});
it("should handle invalid slash usage", () => {
expect(parseModelRef("/", "anthropic")).toBeNull();
expect(parseModelRef("anthropic/", "anthropic")).toBeNull();
expect(parseModelRef("/model", "anthropic")).toBeNull();
it.each(["", " ", "/", "anthropic/", "/model"])("returns null for invalid ref %j", (raw) => {
expect(parseModelRef(raw, "anthropic")).toBeNull();
});
});

View File

@ -113,6 +113,92 @@ function createMoonshotConfig(overrides: {
};
}
function createOpenAiConfigWithResolvedApiKey(mergeMode = false): OpenClawConfig {
return {
models: {
...(mergeMode ? { mode: "merge" as const } : {}),
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY}
api: "openai-completions",
models: [
{
id: "gpt-4.1",
name: "GPT-4.1",
input: ["text"],
reasoning: false,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 16384,
},
],
},
},
},
};
}
async function expectOpenAiEnvMarkerApiKey(options?: { seedMergedProvider?: boolean }) {
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
await withTempHome(async () => {
if (options?.seedMergedProvider) {
await writeAgentModelsJson({
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret
api: "openai-completions",
models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }],
},
},
});
}
await ensureOpenClawModelsJson(
createOpenAiConfigWithResolvedApiKey(options?.seedMergedProvider),
);
const result = await readGeneratedModelsJson<{
providers: Record<string, { apiKey?: string }>;
}>();
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
});
});
}
async function expectMoonshotTokenLimits(params: {
contextWindow: number;
maxTokens: number;
expectedContextWindow: number;
expectedMaxTokens: number;
}) {
await withTempHome(async () => {
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
await ensureOpenClawModelsJson(
createMoonshotConfig({
contextWindow: params.contextWindow,
maxTokens: params.maxTokens,
}),
);
const parsed = await readGeneratedModelsJson<{
providers: Record<
string,
{
models?: Array<{
id: string;
contextWindow?: number;
maxTokens?: number;
}>;
}
>;
}>();
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
expect(kimi?.contextWindow).toBe(params.expectedContextWindow);
expect(kimi?.maxTokens).toBe(params.expectedMaxTokens);
});
});
}
describe("models-config", () => {
it("keeps anthropic api defaults when model entries omit api", async () => {
await withTempHome(async () => {
@ -444,131 +530,28 @@ describe("models-config", () => {
});
it("does not persist resolved env var value as plaintext in models.json", async () => {
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
await withTempHome(async () => {
const cfg: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; already resolved by loadConfig
api: "openai-completions",
models: [
{
id: "gpt-4.1",
name: "GPT-4.1",
input: ["text"],
reasoning: false,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 16384,
},
],
},
},
},
};
await ensureOpenClawModelsJson(cfg);
const result = await readGeneratedModelsJson<{
providers: Record<string, { apiKey?: string }>;
}>();
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY");
});
});
await expectOpenAiEnvMarkerApiKey();
});
it("replaces stale merged apiKey when config key normalizes to a known env marker", async () => {
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
await withTempHome(async () => {
await writeAgentModelsJson({
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret
api: "openai-completions",
models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }],
},
},
});
const cfg: OpenClawConfig = {
models: {
mode: "merge",
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY}
api: "openai-completions",
models: [
{
id: "gpt-4.1",
name: "GPT-4.1",
input: ["text"],
reasoning: false,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 16384,
},
],
},
},
},
};
await ensureOpenClawModelsJson(cfg);
const result = await readGeneratedModelsJson<{
providers: Record<string, { apiKey?: string }>;
}>();
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
});
});
await expectOpenAiEnvMarkerApiKey({ seedMergedProvider: true });
});
it("preserves explicit larger token limits when they exceed implicit catalog defaults", async () => {
await withTempHome(async () => {
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
const cfg = createMoonshotConfig({ contextWindow: 350000, maxTokens: 16384 });
await ensureOpenClawModelsJson(cfg);
const parsed = await readGeneratedModelsJson<{
providers: Record<
string,
{
models?: Array<{
id: string;
contextWindow?: number;
maxTokens?: number;
}>;
}
>;
}>();
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
expect(kimi?.contextWindow).toBe(350000);
expect(kimi?.maxTokens).toBe(16384);
});
await expectMoonshotTokenLimits({
contextWindow: 350000,
maxTokens: 16384,
expectedContextWindow: 350000,
expectedMaxTokens: 16384,
});
});
it("falls back to implicit token limits when explicit values are invalid", async () => {
await withTempHome(async () => {
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
const cfg = createMoonshotConfig({ contextWindow: 0, maxTokens: -1 });
await ensureOpenClawModelsJson(cfg);
const parsed = await readGeneratedModelsJson<{
providers: Record<
string,
{
models?: Array<{
id: string;
contextWindow?: number;
maxTokens?: number;
}>;
}
>;
}>();
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
expect(kimi?.contextWindow).toBe(256000);
expect(kimi?.maxTokens).toBe(8192);
});
await expectMoonshotTokenLimits({
contextWindow: 0,
maxTokens: -1,
expectedContextWindow: 256000,
expectedMaxTokens: 8192,
});
});
});

View File

@ -1,91 +1,82 @@
import { describe, expect, it } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import type { ModelDefinitionConfig } from "../config/types.models.js";
import { installModelsConfigTestHooks, withModelsTempHome } from "./models-config.e2e-harness.js";
import { ensureOpenClawModelsJson } from "./models-config.js";
import { readGeneratedModelsJson } from "./models-config.test-utils.js";
function createGoogleModelsConfig(models: ModelDefinitionConfig[]): OpenClawConfig {
return {
models: {
providers: {
google: {
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
apiKey: "GEMINI_KEY", // pragma: allowlist secret
api: "google-generative-ai",
models,
},
},
},
};
}
async function expectGeneratedGoogleModelIds(ids: string[]) {
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { models: Array<{ id: string }> }>;
}>();
expect(parsed.providers.google?.models?.map((model) => model.id)).toEqual(ids);
}
describe("models-config", () => {
installModelsConfigTestHooks();
it("normalizes gemini 3 ids to preview for google providers", async () => {
await withModelsTempHome(async () => {
const cfg: OpenClawConfig = {
models: {
providers: {
google: {
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
apiKey: "GEMINI_KEY", // pragma: allowlist secret
api: "google-generative-ai",
models: [
{
id: "gemini-3-pro",
name: "Gemini 3 Pro",
api: "google-generative-ai",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
{
id: "gemini-3-flash",
name: "Gemini 3 Flash",
api: "google-generative-ai",
reasoning: false,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
],
},
},
const cfg = createGoogleModelsConfig([
{
id: "gemini-3-pro",
name: "Gemini 3 Pro",
api: "google-generative-ai",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
};
{
id: "gemini-3-flash",
name: "Gemini 3 Flash",
api: "google-generative-ai",
reasoning: false,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
]);
await ensureOpenClawModelsJson(cfg);
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { models: Array<{ id: string }> }>;
}>();
const ids = parsed.providers.google?.models?.map((model) => model.id);
expect(ids).toEqual(["gemini-3-pro-preview", "gemini-3-flash-preview"]);
await expectGeneratedGoogleModelIds(["gemini-3-pro-preview", "gemini-3-flash-preview"]);
});
});
it("normalizes the deprecated google flash preview id to the working preview id", async () => {
await withModelsTempHome(async () => {
const cfg: OpenClawConfig = {
models: {
providers: {
google: {
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
apiKey: "GEMINI_KEY", // pragma: allowlist secret
api: "google-generative-ai",
models: [
{
id: "gemini-3.1-flash-preview",
name: "Gemini 3.1 Flash Preview",
api: "google-generative-ai",
reasoning: false,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
],
},
},
const cfg = createGoogleModelsConfig([
{
id: "gemini-3.1-flash-preview",
name: "Gemini 3.1 Flash Preview",
api: "google-generative-ai",
reasoning: false,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
};
]);
await ensureOpenClawModelsJson(cfg);
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { models: Array<{ id: string }> }>;
}>();
const ids = parsed.providers.google?.models?.map((model) => model.id);
expect(ids).toEqual(["gemini-3-flash-preview"]);
await expectGeneratedGoogleModelIds(["gemini-3-flash-preview"]);
});
});
});

View File

@ -16,47 +16,137 @@ import { readGeneratedModelsJson } from "./models-config.test-utils.js";
installModelsConfigTestHooks();
function createOpenAiApiKeySourceConfig(): OpenClawConfig {
return {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
api: "openai-completions" as const,
models: [],
},
},
},
};
}
function createOpenAiApiKeyRuntimeConfig(): OpenClawConfig {
return {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
api: "openai-completions" as const,
models: [],
},
},
},
};
}
function createOpenAiHeaderSourceConfig(): OpenClawConfig {
return {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions" as const,
headers: {
Authorization: {
source: "env",
provider: "default",
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
},
"X-Tenant-Token": {
source: "file",
provider: "vault",
id: "/providers/openai/tenantToken",
},
},
models: [],
},
},
},
};
}
function createOpenAiHeaderRuntimeConfig(): OpenClawConfig {
return {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions" as const,
headers: {
Authorization: "Bearer runtime-openai-token",
"X-Tenant-Token": "runtime-tenant-token",
},
models: [],
},
},
},
};
}
function withGatewayTokenMode(config: OpenClawConfig): OpenClawConfig {
return {
...config,
gateway: {
auth: {
mode: "token",
},
},
};
}
async function withGeneratedModelsFromRuntimeSource(
params: {
sourceConfig: OpenClawConfig;
runtimeConfig: OpenClawConfig;
candidateConfig?: OpenClawConfig;
},
runAssertions: () => Promise<void>,
) {
await withTempHome(async () => {
try {
setRuntimeConfigSnapshot(params.runtimeConfig, params.sourceConfig);
await ensureOpenClawModelsJson(params.candidateConfig ?? loadConfig());
await runAssertions();
} finally {
clearRuntimeConfigSnapshot();
clearConfigCache();
}
});
}
async function expectGeneratedProviderApiKey(providerId: string, expected: string) {
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { apiKey?: string }>;
}>();
expect(parsed.providers[providerId]?.apiKey).toBe(expected);
}
async function expectGeneratedOpenAiHeaderMarkers() {
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { headers?: Record<string, string> }>;
}>();
expect(parsed.providers.openai?.headers?.Authorization).toBe(
"secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
);
expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER);
}
describe("models-config runtime source snapshot", () => {
it("uses runtime source snapshot markers when passed the active runtime config", async () => {
await withTempHome(async () => {
const sourceConfig: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
api: "openai-completions" as const,
models: [],
},
},
},
};
const runtimeConfig: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
api: "openai-completions" as const,
models: [],
},
},
},
};
try {
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
await ensureOpenClawModelsJson(loadConfig());
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { apiKey?: string }>;
}>();
expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
} finally {
clearRuntimeConfigSnapshot();
clearConfigCache();
}
});
await withGeneratedModelsFromRuntimeSource(
{
sourceConfig: createOpenAiApiKeySourceConfig(),
runtimeConfig: createOpenAiApiKeyRuntimeConfig(),
},
async () => expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"), // pragma: allowlist secret
);
});
it("uses non-env marker from runtime source snapshot for file refs", async () => {
@ -103,30 +193,8 @@ describe("models-config runtime source snapshot", () => {
it("projects cloned runtime configs onto source snapshot when preserving provider auth", async () => {
await withTempHome(async () => {
const sourceConfig: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
api: "openai-completions" as const,
models: [],
},
},
},
};
const runtimeConfig: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
api: "openai-completions" as const,
models: [],
},
},
},
};
const sourceConfig = createOpenAiApiKeySourceConfig();
const runtimeConfig = createOpenAiApiKeyRuntimeConfig();
const clonedRuntimeConfig: OpenClawConfig = {
...runtimeConfig,
agents: {
@ -139,11 +207,7 @@ describe("models-config runtime source snapshot", () => {
try {
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
await ensureOpenClawModelsJson(clonedRuntimeConfig);
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { apiKey?: string }>;
}>();
expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret
} finally {
clearRuntimeConfigSnapshot();
clearConfigCache();
@ -152,121 +216,27 @@ describe("models-config runtime source snapshot", () => {
});
it("uses header markers from runtime source snapshot instead of resolved runtime values", async () => {
await withTempHome(async () => {
const sourceConfig: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions" as const,
headers: {
Authorization: {
source: "env",
provider: "default",
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
},
"X-Tenant-Token": {
source: "file",
provider: "vault",
id: "/providers/openai/tenantToken",
},
},
models: [],
},
},
},
};
const runtimeConfig: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions" as const,
headers: {
Authorization: "Bearer runtime-openai-token",
"X-Tenant-Token": "runtime-tenant-token",
},
models: [],
},
},
},
};
try {
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
await ensureOpenClawModelsJson(loadConfig());
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { headers?: Record<string, string> }>;
}>();
expect(parsed.providers.openai?.headers?.Authorization).toBe(
"secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
);
expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER);
} finally {
clearRuntimeConfigSnapshot();
clearConfigCache();
}
});
await withGeneratedModelsFromRuntimeSource(
{
sourceConfig: createOpenAiHeaderSourceConfig(),
runtimeConfig: createOpenAiHeaderRuntimeConfig(),
},
expectGeneratedOpenAiHeaderMarkers,
);
});
it("keeps source markers when runtime projection is skipped for incompatible top-level shape", async () => {
await withTempHome(async () => {
const sourceConfig: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
api: "openai-completions" as const,
models: [],
},
},
},
gateway: {
auth: {
mode: "token",
},
},
};
const runtimeConfig: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
api: "openai-completions" as const,
models: [],
},
},
},
gateway: {
auth: {
mode: "token",
},
},
};
const sourceConfig = withGatewayTokenMode(createOpenAiApiKeySourceConfig());
const runtimeConfig = withGatewayTokenMode(createOpenAiApiKeyRuntimeConfig());
const incompatibleCandidate: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
api: "openai-completions" as const,
models: [],
},
},
},
...createOpenAiApiKeyRuntimeConfig(),
};
try {
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
await ensureOpenClawModelsJson(incompatibleCandidate);
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { apiKey?: string }>;
}>();
expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret
} finally {
clearRuntimeConfigSnapshot();
clearConfigCache();
@ -276,81 +246,16 @@ describe("models-config runtime source snapshot", () => {
it("keeps source header markers when runtime projection is skipped for incompatible top-level shape", async () => {
await withTempHome(async () => {
const sourceConfig: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions" as const,
headers: {
Authorization: {
source: "env",
provider: "default",
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
},
"X-Tenant-Token": {
source: "file",
provider: "vault",
id: "/providers/openai/tenantToken",
},
},
models: [],
},
},
},
gateway: {
auth: {
mode: "token",
},
},
};
const runtimeConfig: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions" as const,
headers: {
Authorization: "Bearer runtime-openai-token",
"X-Tenant-Token": "runtime-tenant-token",
},
models: [],
},
},
},
gateway: {
auth: {
mode: "token",
},
},
};
const sourceConfig = withGatewayTokenMode(createOpenAiHeaderSourceConfig());
const runtimeConfig = withGatewayTokenMode(createOpenAiHeaderRuntimeConfig());
const incompatibleCandidate: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions" as const,
headers: {
Authorization: "Bearer runtime-openai-token",
"X-Tenant-Token": "runtime-tenant-token",
},
models: [],
},
},
},
...createOpenAiHeaderRuntimeConfig(),
};
try {
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
await ensureOpenClawModelsJson(incompatibleCandidate);
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { headers?: Record<string, string> }>;
}>();
expect(parsed.providers.openai?.headers?.Authorization).toBe(
"secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
);
expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER);
await expectGeneratedOpenAiHeaderMarkers();
} finally {
clearRuntimeConfigSnapshot();
clearConfigCache();

View File

@ -1,31 +1,11 @@
import { afterEach, describe, expect, it, vi } from "vitest";
import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js";
import {
enrichOllamaModelsWithContext,
resolveOllamaApiBase,
type OllamaTagModel,
} from "./ollama-models.js";
function jsonResponse(body: unknown, status = 200): Response {
return new Response(JSON.stringify(body), {
status,
headers: { "Content-Type": "application/json" },
});
}
function requestUrl(input: string | URL | Request): string {
if (typeof input === "string") {
return input;
}
if (input instanceof URL) {
return input.toString();
}
return input.url;
}
function requestBody(body: BodyInit | null | undefined): string {
return typeof body === "string" ? body : "{}";
}
describe("ollama-models", () => {
afterEach(() => {
vi.unstubAllGlobals();
@ -43,7 +23,7 @@ describe("ollama-models", () => {
if (!url.endsWith("/api/show")) {
throw new Error(`Unexpected fetch: ${url}`);
}
const body = JSON.parse(requestBody(init?.body)) as { name?: string };
const body = JSON.parse(requestBodyText(init?.body)) as { name?: string };
if (body.name === "llama3:8b") {
return jsonResponse({ model_info: { "llama.context_length": 65536 } });
}

View File

@ -106,7 +106,7 @@ describe("buildAssistantMessage", () => {
expect(result.usage.totalTokens).toBe(15);
});
it("falls back to thinking when content is empty", () => {
it("drops thinking-only output when content is empty", () => {
const response = {
model: "qwen3:32b",
created_at: "2026-01-01T00:00:00Z",
@ -119,10 +119,10 @@ describe("buildAssistantMessage", () => {
};
const result = buildAssistantMessage(response, modelInfo);
expect(result.stopReason).toBe("stop");
expect(result.content).toEqual([{ type: "text", text: "Thinking output" }]);
expect(result.content).toEqual([]);
});
it("falls back to reasoning when content and thinking are empty", () => {
it("drops reasoning-only output when content and thinking are empty", () => {
const response = {
model: "qwen3:32b",
created_at: "2026-01-01T00:00:00Z",
@ -135,7 +135,7 @@ describe("buildAssistantMessage", () => {
};
const result = buildAssistantMessage(response, modelInfo);
expect(result.stopReason).toBe("stop");
expect(result.content).toEqual([{ type: "text", text: "Reasoning output" }]);
expect(result.content).toEqual([]);
});
it("builds response with tool calls", () => {
@ -203,6 +203,20 @@ function mockNdjsonReader(lines: string[]): ReadableStreamDefaultReader<Uint8Arr
} as unknown as ReadableStreamDefaultReader<Uint8Array>;
}
async function expectDoneEventContent(lines: string[], expectedContent: unknown) {
await withMockNdjsonFetch(lines, async () => {
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
const events = await collectStreamEvents(stream);
const doneEvent = events.at(-1);
if (!doneEvent || doneEvent.type !== "done") {
throw new Error("Expected done event");
}
expect(doneEvent.message.content).toEqual(expectedContent);
});
}
describe("parseNdjsonStream", () => {
it("parses text-only streaming chunks", async () => {
const reader = mockNdjsonReader([
@ -485,89 +499,49 @@ describe("createOllamaStreamFn", () => {
);
});
it("accumulates thinking chunks when content is empty", async () => {
await withMockNdjsonFetch(
it("drops thinking chunks when no final content is emitted", async () => {
await expectDoneEventContent(
[
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"reasoned"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":" output"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
],
async () => {
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
const events = await collectStreamEvents(stream);
const doneEvent = events.at(-1);
if (!doneEvent || doneEvent.type !== "done") {
throw new Error("Expected done event");
}
expect(doneEvent.message.content).toEqual([{ type: "text", text: "reasoned output" }]);
},
[],
);
});
it("prefers streamed content over earlier thinking chunks", async () => {
await withMockNdjsonFetch(
await expectDoneEventContent(
[
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"internal"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
],
async () => {
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
const events = await collectStreamEvents(stream);
const doneEvent = events.at(-1);
if (!doneEvent || doneEvent.type !== "done") {
throw new Error("Expected done event");
}
expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]);
},
[{ type: "text", text: "final answer" }],
);
});
it("accumulates reasoning chunks when thinking is absent", async () => {
await withMockNdjsonFetch(
it("drops reasoning chunks when no final content is emitted", async () => {
await expectDoneEventContent(
[
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"reasoned"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":" output"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
],
async () => {
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
const events = await collectStreamEvents(stream);
const doneEvent = events.at(-1);
if (!doneEvent || doneEvent.type !== "done") {
throw new Error("Expected done event");
}
expect(doneEvent.message.content).toEqual([{ type: "text", text: "reasoned output" }]);
},
[],
);
});
it("prefers streamed content over earlier reasoning chunks", async () => {
await withMockNdjsonFetch(
await expectDoneEventContent(
[
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"internal"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
],
async () => {
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
const events = await collectStreamEvents(stream);
const doneEvent = events.at(-1);
if (!doneEvent || doneEvent.type !== "done") {
throw new Error("Expected done event");
}
expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]);
},
[{ type: "text", text: "final answer" }],
);
});
});

View File

@ -340,10 +340,9 @@ export function buildAssistantMessage(
): AssistantMessage {
const content: (TextContent | ToolCall)[] = [];
// Ollama-native reasoning models may emit their answer in `thinking` or
// `reasoning` with an empty `content`. Fall back so replies are not dropped.
const text =
response.message.content || response.message.thinking || response.message.reasoning || "";
// Native Ollama reasoning fields are internal model output. The reply text
// must come from `content`; reasoning visibility is controlled elsewhere.
const text = response.message.content || "";
if (text) {
content.push({ type: "text", text });
}
@ -497,20 +496,12 @@ export function createOllamaStreamFn(
const reader = response.body.getReader();
let accumulatedContent = "";
let fallbackContent = "";
let sawContent = false;
const accumulatedToolCalls: OllamaToolCall[] = [];
let finalResponse: OllamaChatResponse | undefined;
for await (const chunk of parseNdjsonStream(reader)) {
if (chunk.message?.content) {
sawContent = true;
accumulatedContent += chunk.message.content;
} else if (!sawContent && chunk.message?.thinking) {
fallbackContent += chunk.message.thinking;
} else if (!sawContent && chunk.message?.reasoning) {
// Backward compatibility for older/native variants that still use reasoning.
fallbackContent += chunk.message.reasoning;
}
// Ollama sends tool_calls in intermediate (done:false) chunks,
@ -529,7 +520,7 @@ export function createOllamaStreamFn(
throw new Error("Ollama API stream ended without a final response");
}
finalResponse.message.content = accumulatedContent || fallbackContent;
finalResponse.message.content = accumulatedContent;
if (accumulatedToolCalls.length > 0) {
finalResponse.message.tool_calls = accumulatedToolCalls;
}

View File

@ -115,6 +115,50 @@ function resetSessionStore(store: Record<string, unknown>) {
mockConfig = createMockConfig();
}
function installSandboxedSessionStatusConfig() {
mockConfig = {
session: { mainKey: "main", scope: "per-sender" },
tools: {
sessions: { visibility: "all" },
agentToAgent: { enabled: true, allow: ["*"] },
},
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {},
sandbox: { sessionToolsVisibility: "spawned" },
},
},
};
}
function mockSpawnedSessionList(
resolveSessions: (spawnedBy: string | undefined) => Array<Record<string, unknown>>,
) {
callGatewayMock.mockImplementation(async (opts: unknown) => {
const request = opts as { method?: string; params?: Record<string, unknown> };
if (request.method === "sessions.list") {
return { sessions: resolveSessions(request.params?.spawnedBy as string | undefined) };
}
return {};
});
}
function expectSpawnedSessionLookupCalls(spawnedBy: string) {
const expectedCall = {
method: "sessions.list",
params: {
includeGlobal: false,
includeUnknown: false,
limit: 500,
spawnedBy,
},
};
expect(callGatewayMock).toHaveBeenCalledTimes(2);
expect(callGatewayMock).toHaveBeenNthCalledWith(1, expectedCall);
expect(callGatewayMock).toHaveBeenNthCalledWith(2, expectedCall);
}
function getSessionStatusTool(agentSessionKey = "main", options?: { sandboxed?: boolean }) {
const tool = createOpenClawTools({
agentSessionKey,
@ -242,27 +286,8 @@ describe("session_status tool", () => {
updatedAt: 10,
},
});
mockConfig = {
session: { mainKey: "main", scope: "per-sender" },
tools: {
sessions: { visibility: "all" },
agentToAgent: { enabled: true, allow: ["*"] },
},
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {},
sandbox: { sessionToolsVisibility: "spawned" },
},
},
};
callGatewayMock.mockImplementation(async (opts: unknown) => {
const request = opts as { method?: string; params?: Record<string, unknown> };
if (request.method === "sessions.list") {
return { sessions: [] };
}
return {};
});
installSandboxedSessionStatusConfig();
mockSpawnedSessionList(() => []);
const tool = getSessionStatusTool("agent:main:subagent:child", {
sandboxed: true,
@ -284,25 +309,7 @@ describe("session_status tool", () => {
expect(loadSessionStoreMock).not.toHaveBeenCalled();
expect(updateSessionStoreMock).not.toHaveBeenCalled();
expect(callGatewayMock).toHaveBeenCalledTimes(2);
expect(callGatewayMock).toHaveBeenNthCalledWith(1, {
method: "sessions.list",
params: {
includeGlobal: false,
includeUnknown: false,
limit: 500,
spawnedBy: "agent:main:subagent:child",
},
});
expect(callGatewayMock).toHaveBeenNthCalledWith(2, {
method: "sessions.list",
params: {
includeGlobal: false,
includeUnknown: false,
limit: 500,
spawnedBy: "agent:main:subagent:child",
},
});
expectSpawnedSessionLookupCalls("agent:main:subagent:child");
});
it("keeps legacy main requester keys for sandboxed session tree checks", async () => {
@ -316,30 +323,10 @@ describe("session_status tool", () => {
updatedAt: 20,
},
});
mockConfig = {
session: { mainKey: "main", scope: "per-sender" },
tools: {
sessions: { visibility: "all" },
agentToAgent: { enabled: true, allow: ["*"] },
},
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {},
sandbox: { sessionToolsVisibility: "spawned" },
},
},
};
callGatewayMock.mockImplementation(async (opts: unknown) => {
const request = opts as { method?: string; params?: Record<string, unknown> };
if (request.method === "sessions.list") {
return {
sessions:
request.params?.spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [],
};
}
return {};
});
installSandboxedSessionStatusConfig();
mockSpawnedSessionList((spawnedBy) =>
spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [],
);
const tool = getSessionStatusTool("main", {
sandboxed: true,
@ -357,25 +344,7 @@ describe("session_status tool", () => {
expect(childDetails.ok).toBe(true);
expect(childDetails.sessionKey).toBe("agent:main:subagent:child");
expect(callGatewayMock).toHaveBeenCalledTimes(2);
expect(callGatewayMock).toHaveBeenNthCalledWith(1, {
method: "sessions.list",
params: {
includeGlobal: false,
includeUnknown: false,
limit: 500,
spawnedBy: "main",
},
});
expect(callGatewayMock).toHaveBeenNthCalledWith(2, {
method: "sessions.list",
params: {
includeGlobal: false,
includeUnknown: false,
limit: 500,
spawnedBy: "main",
},
});
expectSpawnedSessionLookupCalls("main");
});
it("scopes bare session keys to the requester agent", async () => {

View File

@ -17,6 +17,63 @@ function writeStore(storePath: string, store: Record<string, unknown>) {
fs.writeFileSync(storePath, JSON.stringify(store, null, 2), "utf-8");
}
function seedLeafOwnedChildSession(storePath: string, leafKey = "agent:main:subagent:leaf") {
const childKey = `${leafKey}:subagent:child`;
writeStore(storePath, {
[leafKey]: {
sessionId: "leaf-session",
updatedAt: Date.now(),
spawnedBy: "agent:main:main",
subagentRole: "leaf",
subagentControlScope: "none",
},
[childKey]: {
sessionId: "child-session",
updatedAt: Date.now(),
spawnedBy: leafKey,
subagentRole: "leaf",
subagentControlScope: "none",
},
});
addSubagentRunForTests({
runId: "run-child",
childSessionKey: childKey,
controllerSessionKey: leafKey,
requesterSessionKey: leafKey,
requesterDisplayKey: leafKey,
task: "impossible child",
cleanup: "keep",
createdAt: Date.now() - 30_000,
startedAt: Date.now() - 30_000,
});
return {
childKey,
tool: createSubagentsTool({ agentSessionKey: leafKey }),
};
}
async function expectLeafSubagentControlForbidden(params: {
storePath: string;
action: "kill" | "steer";
callId: string;
message?: string;
}) {
const { childKey, tool } = seedLeafOwnedChildSession(params.storePath);
const result = await tool.execute(params.callId, {
action: params.action,
target: childKey,
...(params.message ? { message: params.message } : {}),
});
expect(result.details).toMatchObject({
status: "forbidden",
error: "Leaf subagents cannot control other sessions.",
});
expect(callGatewayMock).not.toHaveBeenCalled();
}
describe("openclaw-tools: subagents scope isolation", () => {
let storePath = "";
@ -151,95 +208,19 @@ describe("openclaw-tools: subagents scope isolation", () => {
});
it("leaf subagents cannot kill even explicitly-owned child sessions", async () => {
const leafKey = "agent:main:subagent:leaf";
const childKey = `${leafKey}:subagent:child`;
writeStore(storePath, {
[leafKey]: {
sessionId: "leaf-session",
updatedAt: Date.now(),
spawnedBy: "agent:main:main",
subagentRole: "leaf",
subagentControlScope: "none",
},
[childKey]: {
sessionId: "child-session",
updatedAt: Date.now(),
spawnedBy: leafKey,
subagentRole: "leaf",
subagentControlScope: "none",
},
});
addSubagentRunForTests({
runId: "run-child",
childSessionKey: childKey,
controllerSessionKey: leafKey,
requesterSessionKey: leafKey,
requesterDisplayKey: leafKey,
task: "impossible child",
cleanup: "keep",
createdAt: Date.now() - 30_000,
startedAt: Date.now() - 30_000,
});
const tool = createSubagentsTool({ agentSessionKey: leafKey });
const result = await tool.execute("call-leaf-kill", {
await expectLeafSubagentControlForbidden({
storePath,
action: "kill",
target: childKey,
callId: "call-leaf-kill",
});
expect(result.details).toMatchObject({
status: "forbidden",
error: "Leaf subagents cannot control other sessions.",
});
expect(callGatewayMock).not.toHaveBeenCalled();
});
it("leaf subagents cannot steer even explicitly-owned child sessions", async () => {
const leafKey = "agent:main:subagent:leaf";
const childKey = `${leafKey}:subagent:child`;
writeStore(storePath, {
[leafKey]: {
sessionId: "leaf-session",
updatedAt: Date.now(),
spawnedBy: "agent:main:main",
subagentRole: "leaf",
subagentControlScope: "none",
},
[childKey]: {
sessionId: "child-session",
updatedAt: Date.now(),
spawnedBy: leafKey,
subagentRole: "leaf",
subagentControlScope: "none",
},
});
addSubagentRunForTests({
runId: "run-child",
childSessionKey: childKey,
controllerSessionKey: leafKey,
requesterSessionKey: leafKey,
requesterDisplayKey: leafKey,
task: "impossible child",
cleanup: "keep",
createdAt: Date.now() - 30_000,
startedAt: Date.now() - 30_000,
});
const tool = createSubagentsTool({ agentSessionKey: leafKey });
const result = await tool.execute("call-leaf-steer", {
await expectLeafSubagentControlForbidden({
storePath,
action: "steer",
target: childKey,
callId: "call-leaf-steer",
message: "continue",
});
expect(result.details).toMatchObject({
status: "forbidden",
error: "Leaf subagents cannot control other sessions.",
});
expect(callGatewayMock).not.toHaveBeenCalled();
});
});

View File

@ -174,15 +174,18 @@ export function createOpenClawTools(
createSessionsListTool({
agentSessionKey: options?.agentSessionKey,
sandboxed: options?.sandboxed,
config: options?.config,
}),
createSessionsHistoryTool({
agentSessionKey: options?.agentSessionKey,
sandboxed: options?.sandboxed,
config: options?.config,
}),
createSessionsSendTool({
agentSessionKey: options?.agentSessionKey,
agentChannel: options?.agentChannel,
sandboxed: options?.sandboxed,
config: options?.config,
}),
createSessionsYieldTool({
sessionId: options?.sessionId,

View File

@ -45,98 +45,117 @@ const GROQ_TOO_MANY_REQUESTS_MESSAGE =
const GROQ_SERVICE_UNAVAILABLE_MESSAGE =
"503 Service Unavailable: The server is temporarily unable to handle the request due to overloading or maintenance."; // pragma: allowlist secret
function expectMessageMatches(
matcher: (message: string) => boolean,
samples: readonly string[],
expected: boolean,
) {
for (const sample of samples) {
expect(matcher(sample), sample).toBe(expected);
}
}
describe("isAuthPermanentErrorMessage", () => {
it("matches permanent auth failure patterns", () => {
const samples = [
"invalid_api_key",
"api key revoked",
"api key deactivated",
"key has been disabled",
"key has been revoked",
"account has been deactivated",
"could not authenticate api key",
"could not validate credentials",
"API_KEY_REVOKED",
"api_key_deleted",
];
for (const sample of samples) {
expect(isAuthPermanentErrorMessage(sample)).toBe(true);
}
});
it("does not match transient auth errors", () => {
const samples = [
"unauthorized",
"invalid token",
"authentication failed",
"forbidden",
"access denied",
"token has expired",
];
for (const sample of samples) {
expect(isAuthPermanentErrorMessage(sample)).toBe(false);
}
it.each([
{
name: "matches permanent auth failure patterns",
samples: [
"invalid_api_key",
"api key revoked",
"api key deactivated",
"key has been disabled",
"key has been revoked",
"account has been deactivated",
"could not authenticate api key",
"could not validate credentials",
"API_KEY_REVOKED",
"api_key_deleted",
],
expected: true,
},
{
name: "does not match transient auth errors",
samples: [
"unauthorized",
"invalid token",
"authentication failed",
"forbidden",
"access denied",
"token has expired",
],
expected: false,
},
])("$name", ({ samples, expected }) => {
expectMessageMatches(isAuthPermanentErrorMessage, samples, expected);
});
});
describe("isAuthErrorMessage", () => {
it("matches credential validation errors", () => {
const samples = [
'No credentials found for profile "anthropic:default".',
"No API key found for profile openai.",
];
for (const sample of samples) {
expect(isAuthErrorMessage(sample)).toBe(true);
}
});
it("matches OAuth refresh failures", () => {
const samples = [
"OAuth token refresh failed for anthropic: Failed to refresh OAuth token for anthropic. Please try again or re-authenticate.",
"Please re-authenticate to continue.",
];
for (const sample of samples) {
expect(isAuthErrorMessage(sample)).toBe(true);
}
it.each([
'No credentials found for profile "anthropic:default".',
"No API key found for profile openai.",
"OAuth token refresh failed for anthropic: Failed to refresh OAuth token for anthropic. Please try again or re-authenticate.",
"Please re-authenticate to continue.",
])("matches auth errors for %j", (sample) => {
expect(isAuthErrorMessage(sample)).toBe(true);
});
});
describe("isBillingErrorMessage", () => {
it("matches credit / payment failures", () => {
const samples = [
"Your credit balance is too low to access the Anthropic API.",
"insufficient credits",
"Payment Required",
"HTTP 402 Payment Required",
"plans & billing",
// Venice returns "Insufficient USD or Diem balance" which has extra words
// between "insufficient" and "balance"
"Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.",
// OpenRouter returns "requires more credits" for underfunded accounts
"This model requires more credits to use",
"This endpoint require more credits",
];
for (const sample of samples) {
expect(isBillingErrorMessage(sample)).toBe(true);
}
});
it("does not false-positive on issue IDs or text containing 402", () => {
const falsePositives = [
"Fixed issue CHE-402 in the latest release",
"See ticket #402 for details",
"ISSUE-402 has been resolved",
"Room 402 is available",
"Error code 403 was returned, not 402-related",
"The building at 402 Main Street",
"processed 402 records",
"402 items found in the database",
"port 402 is open",
"Use a 402 stainless bolt",
"Book a 402 room",
"There is a 402 near me",
];
for (const sample of falsePositives) {
expect(isBillingErrorMessage(sample)).toBe(false);
}
it.each([
{
name: "matches credit and payment failures",
samples: [
"Your credit balance is too low to access the Anthropic API.",
"insufficient credits",
"Payment Required",
"HTTP 402 Payment Required",
"plans & billing",
"Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.",
"This model requires more credits to use",
"This endpoint require more credits",
],
expected: true,
},
{
name: "does not false-positive on issue ids and numeric references",
samples: [
"Fixed issue CHE-402 in the latest release",
"See ticket #402 for details",
"ISSUE-402 has been resolved",
"Room 402 is available",
"Error code 403 was returned, not 402-related",
"The building at 402 Main Street",
"processed 402 records",
"402 items found in the database",
"port 402 is open",
"Use a 402 stainless bolt",
"Book a 402 room",
"There is a 402 near me",
],
expected: false,
},
{
name: "still matches real HTTP 402 billing errors",
samples: [
"HTTP 402 Payment Required",
"status: 402",
"error code 402",
"http 402",
"status=402 payment required",
"got a 402 from the API",
"returned 402",
"received a 402 response",
'{"status":402,"type":"error"}',
'{"code":402,"message":"payment required"}',
'{"error":{"code":402,"message":"billing hard limit reached"}}',
],
expected: true,
},
])("$name", ({ samples, expected }) => {
expectMessageMatches(isBillingErrorMessage, samples, expected);
});
it("does not false-positive on long assistant responses mentioning billing keywords", () => {
// Simulate a multi-paragraph assistant response that mentions billing terms
const longResponse =
@ -176,37 +195,27 @@ describe("isBillingErrorMessage", () => {
expect(longNonError.length).toBeGreaterThan(512);
expect(isBillingErrorMessage(longNonError)).toBe(false);
});
it("still matches real HTTP 402 billing errors", () => {
const realErrors = [
"HTTP 402 Payment Required",
"status: 402",
"error code 402",
"http 402",
"status=402 payment required",
"got a 402 from the API",
"returned 402",
"received a 402 response",
'{"status":402,"type":"error"}',
'{"code":402,"message":"payment required"}',
'{"error":{"code":402,"message":"billing hard limit reached"}}',
];
for (const sample of realErrors) {
expect(isBillingErrorMessage(sample)).toBe(true);
}
it("prefers billing when API-key and 402 hints both appear", () => {
const sample =
"402 Payment Required: The account associated with this API key has reached its maximum allowed monthly spending limit.";
expect(isBillingErrorMessage(sample)).toBe(true);
expect(classifyFailoverReason(sample)).toBe("billing");
});
});
describe("isCloudCodeAssistFormatError", () => {
it("matches format errors", () => {
const samples = [
"INVALID_REQUEST_ERROR: string should match pattern",
"messages.1.content.1.tool_use.id",
"tool_use.id should match pattern",
"invalid request format",
];
for (const sample of samples) {
expect(isCloudCodeAssistFormatError(sample)).toBe(true);
}
expectMessageMatches(
isCloudCodeAssistFormatError,
[
"INVALID_REQUEST_ERROR: string should match pattern",
"messages.1.content.1.tool_use.id",
"tool_use.id should match pattern",
"invalid request format",
],
true,
);
});
});
@ -238,20 +247,24 @@ describe("isCloudflareOrHtmlErrorPage", () => {
});
describe("isCompactionFailureError", () => {
it("matches compaction overflow failures", () => {
const samples = [
'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}',
"auto-compaction failed due to context overflow",
"Compaction failed: prompt is too long",
"Summarization failed: context window exceeded for this request",
];
for (const sample of samples) {
expect(isCompactionFailureError(sample)).toBe(true);
}
});
it("ignores non-compaction overflow errors", () => {
expect(isCompactionFailureError("Context overflow: prompt too large")).toBe(false);
expect(isCompactionFailureError("rate limit exceeded")).toBe(false);
it.each([
{
name: "matches compaction overflow failures",
samples: [
'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}',
"auto-compaction failed due to context overflow",
"Compaction failed: prompt is too long",
"Summarization failed: context window exceeded for this request",
],
expected: true,
},
{
name: "ignores non-compaction overflow errors",
samples: ["Context overflow: prompt too large", "rate limit exceeded"],
expected: false,
},
])("$name", ({ samples, expected }) => {
expectMessageMatches(isCompactionFailureError, samples, expected);
});
});
@ -506,6 +519,10 @@ describe("isTransientHttpError", () => {
});
describe("classifyFailoverReasonFromHttpStatus", () => {
it("treats HTTP 401 permanent auth failures as auth_permanent", () => {
expect(classifyFailoverReasonFromHttpStatus(401, "invalid_api_key")).toBe("auth_permanent");
});
it("treats HTTP 422 as format error", () => {
expect(classifyFailoverReasonFromHttpStatus(422)).toBe("format");
expect(classifyFailoverReasonFromHttpStatus(422, "check open ai req parameter error")).toBe(
@ -518,6 +535,10 @@ describe("classifyFailoverReasonFromHttpStatus", () => {
expect(classifyFailoverReasonFromHttpStatus(422, "insufficient credits")).toBe("billing");
});
it("treats HTTP 400 insufficient-quota payloads as billing instead of format", () => {
expect(classifyFailoverReasonFromHttpStatus(400, INSUFFICIENT_QUOTA_PAYLOAD)).toBe("billing");
});
it("treats HTTP 499 as transient for structured errors", () => {
expect(classifyFailoverReasonFromHttpStatus(499)).toBe("timeout");
expect(classifyFailoverReasonFromHttpStatus(499, "499 Client Closed Request")).toBe("timeout");

View File

@ -1,9 +1,14 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import "./test-helpers/fast-coding-tools.js";
import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import {
cleanupEmbeddedPiRunnerTestWorkspace,
createEmbeddedPiRunnerOpenAiConfig,
createEmbeddedPiRunnerTestWorkspace,
type EmbeddedPiRunnerTestWorkspace,
immediateEnqueue,
} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js";
function createMockUsage(input: number, output: number) {
return {
@ -88,7 +93,7 @@ vi.mock("@mariozechner/pi-ai", async () => {
let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent;
let SessionManager: typeof import("@mariozechner/pi-coding-agent").SessionManager;
let tempRoot: string | undefined;
let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined;
let agentDir: string;
let workspaceDir: string;
let sessionCounter = 0;
@ -98,50 +103,21 @@ beforeAll(async () => {
vi.useRealTimers();
({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js"));
({ SessionManager } = await import("@mariozechner/pi-coding-agent"));
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-embedded-agent-"));
agentDir = path.join(tempRoot, "agent");
workspaceDir = path.join(tempRoot, "workspace");
await fs.mkdir(agentDir, { recursive: true });
await fs.mkdir(workspaceDir, { recursive: true });
e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-embedded-agent-");
({ agentDir, workspaceDir } = e2eWorkspace);
}, 180_000);
afterAll(async () => {
if (!tempRoot) {
return;
}
await fs.rm(tempRoot, { recursive: true, force: true });
tempRoot = undefined;
await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace);
e2eWorkspace = undefined;
});
const makeOpenAiConfig = (modelIds: string[]) =>
({
models: {
providers: {
openai: {
api: "openai-responses",
apiKey: "sk-test",
baseUrl: "https://example.com",
models: modelIds.map((id) => ({
id,
name: `Mock ${id}`,
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 16_000,
maxTokens: 2048,
})),
},
},
},
}) satisfies OpenClawConfig;
const nextSessionFile = () => {
sessionCounter += 1;
return path.join(workspaceDir, `session-${sessionCounter}.jsonl`);
};
const nextRunId = (prefix = "run-embedded-test") => `${prefix}-${++runCounter}`;
const nextSessionKey = () => `agent:test:embedded:${nextRunId("session-key")}`;
const immediateEnqueue = async <T>(task: () => Promise<T>) => task();
const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string) => {
const sessionFile = nextSessionFile();
@ -152,7 +128,7 @@ const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string
timestamp: Date.now(),
});
const cfg = makeOpenAiConfig(["mock-1"]);
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-1"]);
return await runEmbeddedPiAgent({
sessionId: "session:test",
sessionKey,
@ -197,7 +173,7 @@ const readSessionMessages = async (sessionFile: string) => {
};
const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessionKey: string) => {
const cfg = makeOpenAiConfig(["mock-error"]);
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]);
await runEmbeddedPiAgent({
sessionId: "session:test",
sessionKey,
@ -217,7 +193,7 @@ const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessi
describe("runEmbeddedPiAgent", () => {
it("handles prompt error paths without dropping user state", async () => {
const sessionFile = nextSessionFile();
const cfg = makeOpenAiConfig(["mock-error"]);
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]);
const sessionKey = nextSessionKey();
const result = await runEmbeddedPiAgent({
sessionId: "session:test",

View File

@ -8,12 +8,17 @@
* Follows the same pattern as pi-embedded-runner.e2e.test.ts.
*/
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import "./test-helpers/fast-coding-tools.js";
import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import { isEmbeddedPiRunActive, queueEmbeddedPiMessage } from "./pi-embedded-runner/runs.js";
import {
cleanupEmbeddedPiRunnerTestWorkspace,
createEmbeddedPiRunnerOpenAiConfig,
createEmbeddedPiRunnerTestWorkspace,
type EmbeddedPiRunnerTestWorkspace,
immediateEnqueue,
} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js";
function createMockUsage(input: number, output: number) {
return {
@ -126,7 +131,7 @@ vi.mock("@mariozechner/pi-ai", async () => {
});
let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent;
let tempRoot: string | undefined;
let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined;
let agentDir: string;
let workspaceDir: string;
@ -136,45 +141,15 @@ beforeAll(async () => {
responsePlan = [];
observedContexts = [];
({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js"));
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-yield-e2e-"));
agentDir = path.join(tempRoot, "agent");
workspaceDir = path.join(tempRoot, "workspace");
await fs.mkdir(agentDir, { recursive: true });
await fs.mkdir(workspaceDir, { recursive: true });
e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-yield-e2e-");
({ agentDir, workspaceDir } = e2eWorkspace);
}, 180_000);
afterAll(async () => {
if (!tempRoot) {
return;
}
await fs.rm(tempRoot, { recursive: true, force: true });
tempRoot = undefined;
await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace);
e2eWorkspace = undefined;
});
const makeConfig = (modelIds: string[]) =>
({
models: {
providers: {
openai: {
api: "openai-responses",
apiKey: "sk-test",
baseUrl: "https://example.com",
models: modelIds.map((id) => ({
id,
name: `Mock ${id}`,
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 16_000,
maxTokens: 2048,
})),
},
},
},
}) satisfies OpenClawConfig;
const immediateEnqueue = async <T>(task: () => Promise<T>) => task();
const readSessionMessages = async (sessionFile: string) => {
const raw = await fs.readFile(sessionFile, "utf-8");
return raw
@ -205,7 +180,7 @@ describe("sessions_yield e2e", () => {
const sessionId = "yield-e2e-parent";
const sessionFile = path.join(workspaceDir, "session-yield-e2e.jsonl");
const cfg = makeConfig(["mock-yield"]);
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield"]);
const result = await runEmbeddedPiAgent({
sessionId,
@ -304,7 +279,7 @@ describe("sessions_yield e2e", () => {
const sessionId = "yield-e2e-abort";
const sessionFile = path.join(workspaceDir, "session-yield-abort.jsonl");
const cfg = makeConfig(["mock-yield-abort"]);
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield-abort"]);
const result = await runEmbeddedPiAgent({
sessionId,

View File

@ -7,6 +7,7 @@ import {
usesOpenAiStringModeAnthropicToolChoice,
} from "../provider-capabilities.js";
import { log } from "./logger.js";
import { streamWithPayloadPatch } from "./stream-payload-utils.js";
const ANTHROPIC_CONTEXT_1M_BETA = "context-1m-2025-08-07";
const ANTHROPIC_1M_MODEL_PREFIXES = ["claude-opus-4", "claude-sonnet-4"] as const;
@ -341,18 +342,10 @@ export function createAnthropicFastModeWrapper(
return underlying(model, context, options);
}
const originalOnPayload = options?.onPayload;
return underlying(model, context, {
...options,
onPayload: (payload) => {
if (payload && typeof payload === "object") {
const payloadObj = payload as Record<string, unknown>;
if (payloadObj.service_tier === undefined) {
payloadObj.service_tier = serviceTier;
}
}
return originalOnPayload?.(payload, model);
},
return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => {
if (payloadObj.service_tier === undefined) {
payloadObj.service_tier = serviceTier;
}
});
};
}

View File

@ -278,6 +278,7 @@ vi.mock("../../config/channel-capabilities.js", () => ({
}));
vi.mock("../../utils/message-channel.js", () => ({
INTERNAL_MESSAGE_CHANNEL: "webchat",
normalizeMessageChannel: vi.fn(() => undefined),
}));
@ -375,6 +376,16 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
unregisterApiProviders(getCustomApiRegistrySourceId("ollama"));
});
async function runDirectCompaction(customInstructions = "focus on decisions") {
return await compactEmbeddedPiSessionDirect({
sessionId: "session-1",
sessionKey: "agent:main:session-1",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
customInstructions,
});
}
it("bootstraps runtime plugins with the resolved workspace", async () => {
await compactEmbeddedPiSessionDirect({
sessionId: "session-1",
@ -472,13 +483,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
hookRunner.hasHooks.mockReturnValue(true);
sanitizeSessionHistoryMock.mockResolvedValue([]);
const result = await compactEmbeddedPiSessionDirect({
sessionId: "session-1",
sessionKey: "agent:main:session-1",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
customInstructions: "focus on decisions",
});
const result = await runDirectCompaction();
expect(result.ok).toBe(true);
const beforeContext = sessionHook("compact:before")?.context;
@ -528,13 +533,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
details: { ok: true },
});
const result = await compactEmbeddedPiSessionDirect({
sessionId: "session-1",
sessionKey: "agent:main:session-1",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
customInstructions: "focus on decisions",
});
const result = await runDirectCompaction();
expect(result).toMatchObject({
ok: true,

View File

@ -0,0 +1,44 @@
import { describe, expect, it } from "vitest";
import { CommandLane } from "../../process/lanes.js";
import { resolveGlobalLane, resolveSessionLane } from "./lanes.js";
describe("resolveGlobalLane", () => {
it("defaults to main lane when no lane is provided", () => {
expect(resolveGlobalLane()).toBe(CommandLane.Main);
expect(resolveGlobalLane("")).toBe(CommandLane.Main);
expect(resolveGlobalLane(" ")).toBe(CommandLane.Main);
});
it("maps cron lane to nested lane to prevent deadlocks", () => {
// When cron jobs trigger nested agent runs, the outer execution holds
// the cron lane slot. Inner work must use a separate lane to avoid
// deadlock. See: https://github.com/openclaw/openclaw/issues/44805
expect(resolveGlobalLane("cron")).toBe(CommandLane.Nested);
expect(resolveGlobalLane(" cron ")).toBe(CommandLane.Nested);
});
it("preserves other lanes as-is", () => {
expect(resolveGlobalLane("main")).toBe(CommandLane.Main);
expect(resolveGlobalLane("subagent")).toBe(CommandLane.Subagent);
expect(resolveGlobalLane("nested")).toBe(CommandLane.Nested);
expect(resolveGlobalLane("custom-lane")).toBe("custom-lane");
expect(resolveGlobalLane(" custom ")).toBe("custom");
});
});
describe("resolveSessionLane", () => {
it("defaults to main lane and prefixes with session:", () => {
expect(resolveSessionLane("")).toBe("session:main");
expect(resolveSessionLane(" ")).toBe("session:main");
});
it("adds session: prefix if not present", () => {
expect(resolveSessionLane("abc123")).toBe("session:abc123");
expect(resolveSessionLane(" xyz ")).toBe("session:xyz");
});
it("preserves existing session: prefix", () => {
expect(resolveSessionLane("session:abc")).toBe("session:abc");
expect(resolveSessionLane("session:main")).toBe("session:main");
});
});

View File

@ -7,6 +7,10 @@ export function resolveSessionLane(key: string) {
export function resolveGlobalLane(lane?: string) {
const cleaned = lane?.trim();
// Cron jobs hold the cron lane slot; inner operations must use nested to avoid deadlock.
if (cleaned === CommandLane.Cron) {
return CommandLane.Nested;
}
return cleaned ? cleaned : CommandLane.Main;
}

View File

@ -2,6 +2,7 @@ import type { StreamFn } from "@mariozechner/pi-agent-core";
import type { SimpleStreamOptions } from "@mariozechner/pi-ai";
import { streamSimple } from "@mariozechner/pi-ai";
import { log } from "./logger.js";
import { streamWithPayloadPatch } from "./stream-payload-utils.js";
type OpenAIServiceTier = "auto" | "default" | "flex" | "priority";
type OpenAIReasoningEffort = "low" | "medium" | "high";
@ -325,18 +326,10 @@ export function createOpenAIServiceTierWrapper(
) {
return underlying(model, context, options);
}
const originalOnPayload = options?.onPayload;
return underlying(model, context, {
...options,
onPayload: (payload) => {
if (payload && typeof payload === "object") {
const payloadObj = payload as Record<string, unknown>;
if (payloadObj.service_tier === undefined) {
payloadObj.service_tier = serviceTier;
}
}
return originalOnPayload?.(payload, model);
},
return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => {
if (payloadObj.service_tier === undefined) {
payloadObj.service_tier = serviceTier;
}
});
};
}

View File

@ -249,6 +249,72 @@ function createSubscriptionMock() {
};
}
function resetEmbeddedAttemptHarness(
params: {
includeSpawnSubagent?: boolean;
subscribeImpl?: () => ReturnType<typeof createSubscriptionMock>;
sessionMessages?: AgentMessage[];
} = {},
) {
if (params.includeSpawnSubagent) {
hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({
status: "accepted",
childSessionKey: "agent:main:subagent:child",
runId: "run-child",
});
}
hoisted.createAgentSessionMock.mockReset();
hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager);
hoisted.resolveSandboxContextMock.mockReset();
hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({
release: async () => {},
});
hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null);
hoisted.sessionManager.branch.mockReset();
hoisted.sessionManager.resetLeaf.mockReset();
hoisted.sessionManager.buildSessionContext
.mockReset()
.mockReturnValue({ messages: params.sessionMessages ?? [] });
hoisted.sessionManager.appendCustomEntry.mockReset();
if (params.subscribeImpl) {
hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(params.subscribeImpl);
}
}
async function cleanupTempPaths(tempPaths: string[]) {
while (tempPaths.length > 0) {
const target = tempPaths.pop();
if (target) {
await fs.rm(target, { recursive: true, force: true });
}
}
}
function createDefaultEmbeddedSession(): MutableSession {
const session: MutableSession = {
sessionId: "embedded-session",
messages: [],
isCompacting: false,
isStreaming: false,
agent: {
replaceMessages: (messages: unknown[]) => {
session.messages = [...messages];
},
},
prompt: async () => {
session.messages = [
...session.messages,
{ role: "assistant", content: "done", timestamp: 2 },
];
},
abort: async () => {},
dispose: () => {},
steer: async () => {},
};
return session;
}
const testModel = {
api: "openai-completions",
provider: "openai",
@ -269,32 +335,14 @@ describe("runEmbeddedAttempt sessions_spawn workspace inheritance", () => {
const tempPaths: string[] = [];
beforeEach(() => {
hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({
status: "accepted",
childSessionKey: "agent:main:subagent:child",
runId: "run-child",
resetEmbeddedAttemptHarness({
includeSpawnSubagent: true,
subscribeImpl: createSubscriptionMock,
});
hoisted.createAgentSessionMock.mockReset();
hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager);
hoisted.resolveSandboxContextMock.mockReset();
hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(createSubscriptionMock);
hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({
release: async () => {},
});
hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null);
hoisted.sessionManager.branch.mockReset();
hoisted.sessionManager.resetLeaf.mockReset();
hoisted.sessionManager.buildSessionContext.mockReset().mockReturnValue({ messages: [] });
hoisted.sessionManager.appendCustomEntry.mockReset();
});
afterEach(async () => {
while (tempPaths.length > 0) {
const target = tempPaths.pop();
if (target) {
await fs.rm(target, { recursive: true, force: true });
}
}
await cleanupTempPaths(tempPaths);
});
it("passes the real workspace to sessions_spawn when workspaceAccess is ro", async () => {
@ -394,26 +442,11 @@ describe("runEmbeddedAttempt cache-ttl tracking after compaction", () => {
const tempPaths: string[] = [];
beforeEach(() => {
hoisted.createAgentSessionMock.mockReset();
hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager);
hoisted.resolveSandboxContextMock.mockReset();
hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({
release: async () => {},
});
hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null);
hoisted.sessionManager.branch.mockReset();
hoisted.sessionManager.resetLeaf.mockReset();
hoisted.sessionManager.buildSessionContext.mockReset().mockReturnValue({ messages: [] });
hoisted.sessionManager.appendCustomEntry.mockReset();
resetEmbeddedAttemptHarness();
});
afterEach(async () => {
while (tempPaths.length > 0) {
const target = tempPaths.pop();
if (target) {
await fs.rm(target, { recursive: true, force: true });
}
}
await cleanupTempPaths(tempPaths);
});
async function runAttemptWithCacheTtl(compactionCount: number) {
@ -428,30 +461,9 @@ describe("runEmbeddedAttempt cache-ttl tracking after compaction", () => {
getCompactionCount: () => compactionCount,
}));
hoisted.createAgentSessionMock.mockImplementation(async () => {
const session: MutableSession = {
sessionId: "embedded-session",
messages: [],
isCompacting: false,
isStreaming: false,
agent: {
replaceMessages: (messages: unknown[]) => {
session.messages = [...messages];
},
},
prompt: async () => {
session.messages = [
...session.messages,
{ role: "assistant", content: "done", timestamp: 2 },
];
},
abort: async () => {},
dispose: () => {},
steer: async () => {},
};
return { session };
});
hoisted.createAgentSessionMock.mockImplementation(async () => ({
session: createDefaultEmbeddedSession(),
}));
return await runEmbeddedAttempt({
sessionId: "embedded-session",
@ -591,30 +603,9 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => {
.mockReset()
.mockReturnValue({ messages: seedMessages });
hoisted.createAgentSessionMock.mockImplementation(async () => {
const session: MutableSession = {
sessionId: "embedded-session",
messages: [],
isCompacting: false,
isStreaming: false,
agent: {
replaceMessages: (messages: unknown[]) => {
session.messages = [...messages];
},
},
prompt: async () => {
session.messages = [
...session.messages,
{ role: "assistant", content: "done", timestamp: 2 },
];
},
abort: async () => {},
dispose: () => {},
steer: async () => {},
};
return { session };
});
hoisted.createAgentSessionMock.mockImplementation(async () => ({
session: createDefaultEmbeddedSession(),
}));
return await runEmbeddedAttempt({
sessionId: "embedded-session",

View File

@ -0,0 +1,20 @@
import type { StreamFn } from "@mariozechner/pi-agent-core";
export function streamWithPayloadPatch(
underlying: StreamFn,
model: Parameters<StreamFn>[0],
context: Parameters<StreamFn>[1],
options: Parameters<StreamFn>[2],
patchPayload: (payload: Record<string, unknown>) => void,
) {
const originalOnPayload = options?.onPayload;
return underlying(model, context, {
...options,
onPayload: (payload) => {
if (payload && typeof payload === "object") {
patchPayload(payload as Record<string, unknown>);
}
return originalOnPayload?.(payload, model);
},
});
}

View File

@ -1,22 +1,13 @@
import { spawnSync } from "node:child_process";
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { describe, expect, it } from "vitest";
import { withTempDir } from "../../test-helpers/temp-dir.js";
import {
buildPinnedWritePlan,
SANDBOX_PINNED_MUTATION_PYTHON,
} from "./fs-bridge-mutation-helper.js";
async function withTempRoot<T>(prefix: string, run: (root: string) => Promise<T>): Promise<T> {
const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
try {
return await run(root);
} finally {
await fs.rm(root, { recursive: true, force: true });
}
}
function runMutation(args: string[], input?: string) {
return spawnSync("python3", ["-c", SANDBOX_PINNED_MUTATION_PYTHON, ...args], {
input,
@ -56,7 +47,7 @@ function runWritePlan(args: string[], input?: string) {
describe("sandbox pinned mutation helper", () => {
it("writes through a pinned directory fd", async () => {
await withTempRoot("openclaw-mutation-helper-", async (root) => {
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
const workspace = path.join(root, "workspace");
await fs.mkdir(workspace, { recursive: true });
@ -72,7 +63,7 @@ describe("sandbox pinned mutation helper", () => {
it.runIf(process.platform !== "win32")(
"preserves stdin payload bytes when the pinned write plan runs through sh",
async () => {
await withTempRoot("openclaw-mutation-helper-", async (root) => {
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
const workspace = path.join(root, "workspace");
await fs.mkdir(workspace, { recursive: true });
@ -92,7 +83,7 @@ describe("sandbox pinned mutation helper", () => {
it.runIf(process.platform !== "win32")(
"rejects symlink-parent writes instead of materializing a temp file outside the mount",
async () => {
await withTempRoot("openclaw-mutation-helper-", async (root) => {
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
const workspace = path.join(root, "workspace");
const outside = path.join(root, "outside");
await fs.mkdir(workspace, { recursive: true });
@ -108,7 +99,7 @@ describe("sandbox pinned mutation helper", () => {
);
it.runIf(process.platform !== "win32")("rejects symlink segments during mkdirp", async () => {
await withTempRoot("openclaw-mutation-helper-", async (root) => {
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
const workspace = path.join(root, "workspace");
const outside = path.join(root, "outside");
await fs.mkdir(workspace, { recursive: true });
@ -123,7 +114,7 @@ describe("sandbox pinned mutation helper", () => {
});
it.runIf(process.platform !== "win32")("remove unlinks the symlink itself", async () => {
await withTempRoot("openclaw-mutation-helper-", async (root) => {
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
const workspace = path.join(root, "workspace");
const outside = path.join(root, "outside");
await fs.mkdir(workspace, { recursive: true });
@ -144,7 +135,7 @@ describe("sandbox pinned mutation helper", () => {
it.runIf(process.platform !== "win32")(
"rejects symlink destination parents during rename",
async () => {
await withTempRoot("openclaw-mutation-helper-", async (root) => {
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
const workspace = path.join(root, "workspace");
const outside = path.join(root, "outside");
await fs.mkdir(workspace, { recursive: true });
@ -175,7 +166,7 @@ describe("sandbox pinned mutation helper", () => {
it.runIf(process.platform !== "win32")(
"copies directories across different mount roots during rename fallback",
async () => {
await withTempRoot("openclaw-mutation-helper-", async (root) => {
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
const sourceRoot = path.join(root, "source");
const destRoot = path.join(root, "dest");
await fs.mkdir(path.join(sourceRoot, "dir", "nested"), { recursive: true });

View File

@ -0,0 +1,57 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import type { OpenClawConfig } from "../../config/config.js";
export type EmbeddedPiRunnerTestWorkspace = {
tempRoot: string;
agentDir: string;
workspaceDir: string;
};
export async function createEmbeddedPiRunnerTestWorkspace(
prefix: string,
): Promise<EmbeddedPiRunnerTestWorkspace> {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
const agentDir = path.join(tempRoot, "agent");
const workspaceDir = path.join(tempRoot, "workspace");
await fs.mkdir(agentDir, { recursive: true });
await fs.mkdir(workspaceDir, { recursive: true });
return { tempRoot, agentDir, workspaceDir };
}
export async function cleanupEmbeddedPiRunnerTestWorkspace(
workspace: EmbeddedPiRunnerTestWorkspace | undefined,
): Promise<void> {
if (!workspace) {
return;
}
await fs.rm(workspace.tempRoot, { recursive: true, force: true });
}
export function createEmbeddedPiRunnerOpenAiConfig(modelIds: string[]): OpenClawConfig {
return {
models: {
providers: {
openai: {
api: "openai-responses",
apiKey: "sk-test",
baseUrl: "https://example.com",
models: modelIds.map((id) => ({
id,
name: `Mock ${id}`,
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 16_000,
maxTokens: 2048,
})),
},
},
},
};
}
export async function immediateEnqueue<T>(task: () => Promise<T>): Promise<T> {
return await task();
}

View File

@ -1,5 +1,5 @@
import { Type } from "@sinclair/typebox";
import { loadConfig } from "../../config/config.js";
import { type OpenClawConfig, loadConfig } from "../../config/config.js";
import { callGateway } from "../../gateway/call.js";
import { capArrayByJsonBytes } from "../../gateway/session-utils.fs.js";
import { jsonUtf8Bytes } from "../../infra/json-utf8-bytes.js";
@ -169,6 +169,7 @@ function enforceSessionsHistoryHardCap(params: {
export function createSessionsHistoryTool(opts?: {
agentSessionKey?: string;
sandboxed?: boolean;
config?: OpenClawConfig;
}): AnyAgentTool {
return {
label: "Session History",
@ -180,7 +181,7 @@ export function createSessionsHistoryTool(opts?: {
const sessionKeyParam = readStringParam(params, "sessionKey", {
required: true,
});
const cfg = loadConfig();
const cfg = opts?.config ?? loadConfig();
const { mainKey, alias, effectiveRequesterKey, restrictToSpawned } =
resolveSandboxedSessionToolContext({
cfg,

View File

@ -1,6 +1,6 @@
import path from "node:path";
import { Type } from "@sinclair/typebox";
import { loadConfig } from "../../config/config.js";
import { type OpenClawConfig, loadConfig } from "../../config/config.js";
import {
resolveSessionFilePath,
resolveSessionFilePathOptions,
@ -33,6 +33,7 @@ const SessionsListToolSchema = Type.Object({
export function createSessionsListTool(opts?: {
agentSessionKey?: string;
sandboxed?: boolean;
config?: OpenClawConfig;
}): AnyAgentTool {
return {
label: "Sessions",
@ -41,7 +42,7 @@ export function createSessionsListTool(opts?: {
parameters: SessionsListToolSchema,
execute: async (_toolCallId, args) => {
const params = args as Record<string, unknown>;
const cfg = loadConfig();
const cfg = opts?.config ?? loadConfig();
const { mainKey, alias, requesterInternalKey, restrictToSpawned } =
resolveSandboxedSessionToolContext({
cfg,

View File

@ -1,6 +1,6 @@
import crypto from "node:crypto";
import { Type } from "@sinclair/typebox";
import { loadConfig } from "../../config/config.js";
import { type OpenClawConfig, loadConfig } from "../../config/config.js";
import { callGateway } from "../../gateway/call.js";
import { normalizeAgentId, resolveAgentIdFromSessionKey } from "../../routing/session-key.js";
import { SESSION_LABEL_MAX_LENGTH } from "../../sessions/session-label.js";
@ -36,6 +36,7 @@ export function createSessionsSendTool(opts?: {
agentSessionKey?: string;
agentChannel?: GatewayMessageChannel;
sandboxed?: boolean;
config?: OpenClawConfig;
}): AnyAgentTool {
return {
label: "Session Send",
@ -46,7 +47,7 @@ export function createSessionsSendTool(opts?: {
execute: async (_toolCallId, args) => {
const params = args as Record<string, unknown>;
const message = readStringParam(params, "message", { required: true });
const cfg = loadConfig();
const cfg = opts?.config ?? loadConfig();
const { mainKey, alias, effectiveRequesterKey, restrictToSpawned } =
resolveSandboxedSessionToolContext({
cfg,

View File

@ -12,11 +12,13 @@ import {
resolveResponsePrefixTemplate,
type ResponsePrefixContext,
} from "./response-prefix-template.js";
import { hasSlackDirectives, parseSlackDirectives } from "./slack-directives.js";
export type NormalizeReplySkipReason = "empty" | "silent" | "heartbeat";
export type NormalizeReplyOptions = {
responsePrefix?: string;
enableSlackInteractiveReplies?: boolean;
/** Context for template variable interpolation in responsePrefix */
responsePrefixContext?: ResponsePrefixContext;
onHeartbeatStrip?: () => void;
@ -105,5 +107,10 @@ export function normalizeReplyPayload(
text = `${effectivePrefix} ${text}`;
}
return { ...enrichedPayload, text };
enrichedPayload = { ...enrichedPayload, text };
if (opts.enableSlackInteractiveReplies && text && hasSlackDirectives(text)) {
enrichedPayload = parseSlackDirectives(enrichedPayload);
}
return enrichedPayload;
}

View File

@ -43,6 +43,7 @@ function getHumanDelay(config: HumanDelayConfig | undefined): number {
export type ReplyDispatcherOptions = {
deliver: ReplyDispatchDeliverer;
responsePrefix?: string;
enableSlackInteractiveReplies?: boolean;
/** Static context for response prefix template interpolation. */
responsePrefixContext?: ResponsePrefixContext;
/** Dynamic context provider for response prefix template interpolation.
@ -84,7 +85,11 @@ export type ReplyDispatcher = {
type NormalizeReplyPayloadInternalOptions = Pick<
ReplyDispatcherOptions,
"responsePrefix" | "responsePrefixContext" | "responsePrefixContextProvider" | "onHeartbeatStrip"
| "responsePrefix"
| "enableSlackInteractiveReplies"
| "responsePrefixContext"
| "responsePrefixContextProvider"
| "onHeartbeatStrip"
> & {
onSkip?: (reason: NormalizeReplySkipReason) => void;
};
@ -98,6 +103,7 @@ function normalizeReplyPayloadInternal(
return normalizeReplyPayload(payload, {
responsePrefix: opts.responsePrefix,
enableSlackInteractiveReplies: opts.enableSlackInteractiveReplies,
responsePrefixContext: prefixContext,
onHeartbeatStrip: opts.onHeartbeatStrip,
onSkip: opts.onSkip,
@ -129,6 +135,7 @@ export function createReplyDispatcher(options: ReplyDispatcherOptions): ReplyDis
const enqueue = (kind: ReplyDispatchKind, payload: ReplyPayload) => {
const normalized = normalizeReplyPayloadInternal(payload, {
responsePrefix: options.responsePrefix,
enableSlackInteractiveReplies: options.enableSlackInteractiveReplies,
responsePrefixContext: options.responsePrefixContext,
responsePrefixContextProvider: options.responsePrefixContextProvider,
onHeartbeatStrip: options.onHeartbeatStrip,

View File

@ -16,6 +16,7 @@ import {
} from "./queue.js";
import { createReplyDispatcher } from "./reply-dispatcher.js";
import { createReplyToModeFilter, resolveReplyToMode } from "./reply-threading.js";
import { parseSlackDirectives, hasSlackDirectives } from "./slack-directives.js";
describe("normalizeInboundTextNewlines", () => {
it("normalizes real newlines and preserves literal backslash-n sequences", () => {
@ -196,6 +197,8 @@ describe("inbound context contract (providers + extensions)", () => {
const getLineData = (result: ReturnType<typeof parseLineDirectives>) =>
(result.channelData?.line as Record<string, unknown> | undefined) ?? {};
const getSlackData = (result: ReturnType<typeof parseSlackDirectives>) =>
(result.channelData?.slack as Record<string, unknown> | undefined) ?? {};
describe("hasLineDirectives", () => {
it("matches expected detection across directive patterns", () => {
@ -219,6 +222,24 @@ describe("hasLineDirectives", () => {
});
});
describe("hasSlackDirectives", () => {
it("matches expected detection across Slack directive patterns", () => {
const cases: Array<{ text: string; expected: boolean }> = [
{ text: "Pick one [[slack_buttons: Approve:approve, Reject:reject]]", expected: true },
{
text: "[[slack_select: Choose a project | Alpha:alpha, Beta:beta]]",
expected: true,
},
{ text: "Just regular text", expected: false },
{ text: "[[buttons: Menu | Choose | A:a]]", expected: false },
];
for (const testCase of cases) {
expect(hasSlackDirectives(testCase.text)).toBe(testCase.expected);
}
});
});
describe("parseLineDirectives", () => {
describe("quick_replies", () => {
it("parses quick replies variants", () => {
@ -579,6 +600,279 @@ describe("parseLineDirectives", () => {
});
});
describe("parseSlackDirectives", () => {
it("builds section and button blocks from slack_buttons directives", () => {
const result = parseSlackDirectives({
text: "Choose an action [[slack_buttons: Approve:approve, Reject:reject]]",
});
expect(result.text).toBe("Choose an action");
expect(getSlackData(result).blocks).toEqual([
{
type: "section",
text: {
type: "mrkdwn",
text: "Choose an action",
},
},
{
type: "actions",
block_id: "openclaw_reply_buttons_1",
elements: [
{
type: "button",
action_id: "openclaw:reply_button",
text: {
type: "plain_text",
text: "Approve",
emoji: true,
},
value: "reply_1_approve",
},
{
type: "button",
action_id: "openclaw:reply_button",
text: {
type: "plain_text",
text: "Reject",
emoji: true,
},
value: "reply_2_reject",
},
],
},
]);
});
it("builds static select blocks from slack_select directives", () => {
const result = parseSlackDirectives({
text: "[[slack_select: Choose a project | Alpha:alpha, Beta:beta]]",
});
expect(result.text).toBeUndefined();
expect(getSlackData(result).blocks).toEqual([
{
type: "actions",
block_id: "openclaw_reply_select_1",
elements: [
{
type: "static_select",
action_id: "openclaw:reply_select",
placeholder: {
type: "plain_text",
text: "Choose a project",
emoji: true,
},
options: [
{
text: {
type: "plain_text",
text: "Alpha",
emoji: true,
},
value: "reply_1_alpha",
},
{
text: {
type: "plain_text",
text: "Beta",
emoji: true,
},
value: "reply_2_beta",
},
],
},
],
},
]);
});
it("appends Slack interactive blocks to existing slack blocks", () => {
const result = parseSlackDirectives({
text: "Act now [[slack_buttons: Retry:retry]]",
channelData: {
slack: {
blocks: [{ type: "divider" }],
},
},
});
expect(result.text).toBe("Act now");
expect(getSlackData(result).blocks).toEqual([
{ type: "divider" },
{
type: "section",
text: {
type: "mrkdwn",
text: "Act now",
},
},
{
type: "actions",
block_id: "openclaw_reply_buttons_1",
elements: [
{
type: "button",
action_id: "openclaw:reply_button",
text: {
type: "plain_text",
text: "Retry",
emoji: true,
},
value: "reply_1_retry",
},
],
},
]);
});
it("preserves authored order for mixed Slack directives", () => {
const result = parseSlackDirectives({
text: "[[slack_select: Pick one | Alpha:alpha]] then [[slack_buttons: Retry:retry]]",
});
expect(getSlackData(result).blocks).toEqual([
{
type: "actions",
block_id: "openclaw_reply_select_1",
elements: [
{
type: "static_select",
action_id: "openclaw:reply_select",
placeholder: {
type: "plain_text",
text: "Pick one",
emoji: true,
},
options: [
{
text: {
type: "plain_text",
text: "Alpha",
emoji: true,
},
value: "reply_1_alpha",
},
],
},
],
},
{
type: "section",
text: {
type: "mrkdwn",
text: "then",
},
},
{
type: "actions",
block_id: "openclaw_reply_buttons_1",
elements: [
{
type: "button",
action_id: "openclaw:reply_button",
text: {
type: "plain_text",
text: "Retry",
emoji: true,
},
value: "reply_1_retry",
},
],
},
]);
});
it("truncates Slack interactive reply strings to safe Block Kit limits", () => {
const long = "x".repeat(120);
const result = parseSlackDirectives({
text: `${"y".repeat(3100)} [[slack_select: ${long} | ${long}:${long}]] [[slack_buttons: ${long}:${long}]]`,
});
const blocks = getSlackData(result).blocks as Array<Record<string, unknown>>;
expect(blocks).toHaveLength(3);
expect(((blocks[0]?.text as { text?: string })?.text ?? "").length).toBeLessThanOrEqual(3000);
expect(
(
(
(blocks[1]?.elements as Array<Record<string, unknown>>)?.[0]?.placeholder as {
text?: string;
}
)?.text ?? ""
).length,
).toBeLessThanOrEqual(75);
expect(
(
(
(
(blocks[1]?.elements as Array<Record<string, unknown>>)?.[0]?.options as Array<
Record<string, unknown>
>
)?.[0]?.text as { text?: string }
)?.text ?? ""
).length,
).toBeLessThanOrEqual(75);
expect(
(
((
(blocks[1]?.elements as Array<Record<string, unknown>>)?.[0]?.options as Array<
Record<string, unknown>
>
)?.[0]?.value as string | undefined) ?? ""
).length,
).toBeLessThanOrEqual(75);
expect(
(
(
(blocks[2]?.elements as Array<Record<string, unknown>>)?.[0]?.text as {
text?: string;
}
)?.text ?? ""
).length,
).toBeLessThanOrEqual(75);
expect(
(
((blocks[2]?.elements as Array<Record<string, unknown>>)?.[0]?.value as
| string
| undefined) ?? ""
).length,
).toBeLessThanOrEqual(75);
});
it("falls back to the original payload when generated blocks would exceed Slack limits", () => {
const result = parseSlackDirectives({
text: "Choose [[slack_buttons: Retry:retry]]",
channelData: {
slack: {
blocks: Array.from({ length: 49 }, () => ({ type: "divider" })),
},
},
});
expect(result).toEqual({
text: "Choose [[slack_buttons: Retry:retry]]",
channelData: {
slack: {
blocks: Array.from({ length: 49 }, () => ({ type: "divider" })),
},
},
});
});
it("ignores malformed existing Slack blocks during directive compilation", () => {
expect(() =>
parseSlackDirectives({
text: "Choose [[slack_buttons: Retry:retry]]",
channelData: {
slack: {
blocks: "{not json}",
},
},
}),
).not.toThrow();
});
});
function createDeferred<T>() {
let resolve!: (value: T) => void;
let reject!: (reason?: unknown) => void;
@ -1485,6 +1779,43 @@ describe("createReplyDispatcher", () => {
expect(onHeartbeatStrip).toHaveBeenCalledTimes(2);
});
it("compiles Slack directives in dispatcher flows when enabled", async () => {
const deliver = vi.fn().mockResolvedValue(undefined);
const dispatcher = createReplyDispatcher({
deliver,
enableSlackInteractiveReplies: true,
});
expect(
dispatcher.sendFinalReply({
text: "Choose [[slack_buttons: Retry:retry]]",
}),
).toBe(true);
await dispatcher.waitForIdle();
expect(deliver).toHaveBeenCalledTimes(1);
expect(deliver.mock.calls[0]?.[0]).toMatchObject({
text: "Choose",
channelData: {
slack: {
blocks: [
{
type: "section",
text: {
type: "mrkdwn",
text: "Choose",
},
},
{
type: "actions",
block_id: "openclaw_reply_buttons_1",
},
],
},
},
});
});
it("avoids double-prefixing and keeps media when heartbeat is the only text", async () => {
const deliver = vi.fn().mockResolvedValue(undefined);
const dispatcher = createReplyDispatcher({

View File

@ -150,6 +150,67 @@ describe("normalizeReplyPayload", () => {
expect(result!.text).toBe("");
expect(result!.mediaUrl).toBe("https://example.com/img.png");
});
it("does not compile Slack directives unless interactive replies are enabled", () => {
const result = normalizeReplyPayload({
text: "hello [[slack_buttons: Retry:retry, Ignore:ignore]]",
});
expect(result).not.toBeNull();
expect(result!.text).toBe("hello [[slack_buttons: Retry:retry, Ignore:ignore]]");
expect(result!.channelData).toBeUndefined();
});
it("applies responsePrefix before compiling Slack directives into blocks", () => {
const result = normalizeReplyPayload(
{
text: "hello [[slack_buttons: Retry:retry, Ignore:ignore]]",
},
{ responsePrefix: "[bot]", enableSlackInteractiveReplies: true },
);
expect(result).not.toBeNull();
expect(result!.text).toBe("[bot] hello");
expect(result!.channelData).toEqual({
slack: {
blocks: [
{
type: "section",
text: {
type: "mrkdwn",
text: "[bot] hello",
},
},
{
type: "actions",
block_id: "openclaw_reply_buttons_1",
elements: [
{
type: "button",
action_id: "openclaw:reply_button",
text: {
type: "plain_text",
text: "Retry",
emoji: true,
},
value: "reply_1_retry",
},
{
type: "button",
action_id: "openclaw:reply_button",
text: {
type: "plain_text",
text: "Ignore",
emoji: true,
},
value: "reply_2_ignore",
},
],
},
],
},
});
});
});
describe("typing controller", () => {

View File

@ -201,6 +201,55 @@ describe("routeReply", () => {
);
});
it("routes directive-only Slack replies when interactive replies are enabled", async () => {
mocks.sendMessageSlack.mockClear();
const cfg = {
channels: {
slack: {
capabilities: { interactiveReplies: true },
},
},
} as unknown as OpenClawConfig;
await routeReply({
payload: { text: "[[slack_select: Choose one | Alpha:alpha]]" },
channel: "slack",
to: "channel:C123",
cfg,
});
expect(mocks.sendMessageSlack).toHaveBeenCalledWith(
"channel:C123",
"",
expect.objectContaining({
blocks: [
expect.objectContaining({
type: "actions",
block_id: "openclaw_reply_select_1",
}),
],
}),
);
});
it("does not bypass the empty-reply guard for invalid Slack blocks", async () => {
mocks.sendMessageSlack.mockClear();
const res = await routeReply({
payload: {
text: " ",
channelData: {
slack: {
blocks: " ",
},
},
},
channel: "slack",
to: "channel:C123",
cfg: {} as never,
});
expect(res.ok).toBe(true);
expect(mocks.sendMessageSlack).not.toHaveBeenCalled();
});
it("does not derive responsePrefix from agent identity when routing", async () => {
mocks.sendMessageSlack.mockClear();
const cfg = {

View File

@ -12,6 +12,8 @@ import { resolveEffectiveMessagesConfig } from "../../agents/identity.js";
import { normalizeChannelId } from "../../channels/plugins/index.js";
import type { OpenClawConfig } from "../../config/config.js";
import { buildOutboundSessionContext } from "../../infra/outbound/session-context.js";
import { parseSlackBlocksInput } from "../../slack/blocks-input.js";
import { isSlackInteractiveRepliesEnabled } from "../../slack/interactive-replies.js";
import { INTERNAL_MESSAGE_CHANNEL, normalizeMessageChannel } from "../../utils/message-channel.js";
import type { OriginatingChannelType } from "../templating.js";
import type { ReplyPayload } from "../types.js";
@ -94,6 +96,8 @@ export async function routeReply(params: RouteReplyParams): Promise<RouteReplyRe
: cfg.messages?.responsePrefix;
const normalized = normalizeReplyPayload(payload, {
responsePrefix,
enableSlackInteractiveReplies:
channel === "slack" ? isSlackInteractiveRepliesEnabled({ cfg, accountId }) : false,
});
if (!normalized) {
return { ok: true };
@ -106,9 +110,25 @@ export async function routeReply(params: RouteReplyParams): Promise<RouteReplyRe
? [normalized.mediaUrl]
: [];
const replyToId = normalized.replyToId;
let hasSlackBlocks = false;
if (
channel === "slack" &&
normalized.channelData?.slack &&
typeof normalized.channelData.slack === "object" &&
!Array.isArray(normalized.channelData.slack)
) {
try {
hasSlackBlocks = Boolean(
parseSlackBlocksInput((normalized.channelData.slack as { blocks?: unknown }).blocks)
?.length,
);
} catch {
hasSlackBlocks = false;
}
}
// Skip empty replies.
if (!text.trim() && mediaUrls.length === 0) {
if (!text.trim() && mediaUrls.length === 0 && !hasSlackBlocks) {
return { ok: true };
}

View File

@ -0,0 +1,228 @@
import { parseSlackBlocksInput } from "../../slack/blocks-input.js";
import type { ReplyPayload } from "../types.js";
const SLACK_REPLY_BUTTON_ACTION_ID = "openclaw:reply_button";
const SLACK_REPLY_SELECT_ACTION_ID = "openclaw:reply_select";
const SLACK_MAX_BLOCKS = 50;
const SLACK_BUTTON_MAX_ITEMS = 5;
const SLACK_SELECT_MAX_ITEMS = 100;
const SLACK_SECTION_TEXT_MAX = 3000;
const SLACK_PLAIN_TEXT_MAX = 75;
const SLACK_OPTION_VALUE_MAX = 75;
const SLACK_DIRECTIVE_RE = /\[\[(slack_buttons|slack_select):\s*([^\]]+)\]\]/gi;
type SlackBlock = Record<string, unknown>;
type SlackChannelData = {
blocks?: unknown;
};
type SlackChoice = {
label: string;
value: string;
};
function truncateSlackText(value: string, max: number): string {
const trimmed = value.trim();
if (trimmed.length <= max) {
return trimmed;
}
if (max <= 1) {
return trimmed.slice(0, max);
}
return `${trimmed.slice(0, max - 1)}`;
}
function parseChoice(raw: string): SlackChoice | null {
const trimmed = raw.trim();
if (!trimmed) {
return null;
}
const delimiter = trimmed.indexOf(":");
if (delimiter === -1) {
return {
label: trimmed,
value: trimmed,
};
}
const label = trimmed.slice(0, delimiter).trim();
const value = trimmed.slice(delimiter + 1).trim();
if (!label || !value) {
return null;
}
return { label, value };
}
function parseChoices(raw: string, maxItems: number): SlackChoice[] {
return raw
.split(",")
.map((entry) => parseChoice(entry))
.filter((entry): entry is SlackChoice => Boolean(entry))
.slice(0, maxItems);
}
function buildSlackReplyChoiceToken(value: string, index: number): string {
const slug = value
.trim()
.toLowerCase()
.replace(/[^a-z0-9]+/g, "_")
.replace(/^_+|_+$/g, "");
return truncateSlackText(`reply_${index}_${slug || "choice"}`, SLACK_OPTION_VALUE_MAX);
}
function buildSectionBlock(text: string): SlackBlock | null {
const trimmed = text.trim();
if (!trimmed) {
return null;
}
return {
type: "section",
text: {
type: "mrkdwn",
text: truncateSlackText(trimmed, SLACK_SECTION_TEXT_MAX),
},
};
}
function buildButtonsBlock(raw: string, index: number): SlackBlock | null {
const choices = parseChoices(raw, SLACK_BUTTON_MAX_ITEMS);
if (choices.length === 0) {
return null;
}
return {
type: "actions",
block_id: `openclaw_reply_buttons_${index}`,
elements: choices.map((choice, choiceIndex) => ({
type: "button",
action_id: SLACK_REPLY_BUTTON_ACTION_ID,
text: {
type: "plain_text",
text: truncateSlackText(choice.label, SLACK_PLAIN_TEXT_MAX),
emoji: true,
},
value: buildSlackReplyChoiceToken(choice.value, choiceIndex + 1),
})),
};
}
function buildSelectBlock(raw: string, index: number): SlackBlock | null {
const parts = raw
.split("|")
.map((entry) => entry.trim())
.filter(Boolean);
if (parts.length === 0) {
return null;
}
const [first, second] = parts;
const placeholder = parts.length >= 2 ? first : "Choose an option";
const choices = parseChoices(parts.length >= 2 ? second : first, SLACK_SELECT_MAX_ITEMS);
if (choices.length === 0) {
return null;
}
return {
type: "actions",
block_id: `openclaw_reply_select_${index}`,
elements: [
{
type: "static_select",
action_id: SLACK_REPLY_SELECT_ACTION_ID,
placeholder: {
type: "plain_text",
text: truncateSlackText(placeholder, SLACK_PLAIN_TEXT_MAX),
emoji: true,
},
options: choices.map((choice, choiceIndex) => ({
text: {
type: "plain_text",
text: truncateSlackText(choice.label, SLACK_PLAIN_TEXT_MAX),
emoji: true,
},
value: buildSlackReplyChoiceToken(choice.value, choiceIndex + 1),
})),
},
],
};
}
function readExistingSlackBlocks(payload: ReplyPayload): SlackBlock[] {
const slackData = payload.channelData?.slack as SlackChannelData | undefined;
try {
const blocks = parseSlackBlocksInput(slackData?.blocks) as SlackBlock[] | undefined;
return blocks ?? [];
} catch {
return [];
}
}
export function hasSlackDirectives(text: string): boolean {
SLACK_DIRECTIVE_RE.lastIndex = 0;
return SLACK_DIRECTIVE_RE.test(text);
}
export function parseSlackDirectives(payload: ReplyPayload): ReplyPayload {
const text = payload.text;
if (!text) {
return payload;
}
const generatedBlocks: SlackBlock[] = [];
const visibleTextParts: string[] = [];
let buttonIndex = 0;
let selectIndex = 0;
let cursor = 0;
let matchedDirective = false;
let generatedInteractiveBlock = false;
SLACK_DIRECTIVE_RE.lastIndex = 0;
for (const match of text.matchAll(SLACK_DIRECTIVE_RE)) {
matchedDirective = true;
const matchText = match[0];
const directiveType = match[1];
const body = match[2];
const index = match.index ?? 0;
const precedingText = text.slice(cursor, index);
visibleTextParts.push(precedingText);
const section = buildSectionBlock(precedingText);
if (section) {
generatedBlocks.push(section);
}
const block =
directiveType.toLowerCase() === "slack_buttons"
? buildButtonsBlock(body, ++buttonIndex)
: buildSelectBlock(body, ++selectIndex);
if (block) {
generatedInteractiveBlock = true;
generatedBlocks.push(block);
}
cursor = index + matchText.length;
}
const trailingText = text.slice(cursor);
visibleTextParts.push(trailingText);
const trailingSection = buildSectionBlock(trailingText);
if (trailingSection) {
generatedBlocks.push(trailingSection);
}
const cleanedText = visibleTextParts.join("");
if (!matchedDirective || !generatedInteractiveBlock) {
return payload;
}
const existingBlocks = readExistingSlackBlocks(payload);
if (existingBlocks.length + generatedBlocks.length > SLACK_MAX_BLOCKS) {
return payload;
}
const nextBlocks = [...existingBlocks, ...generatedBlocks];
return {
...payload,
text: cleanedText.trim() || undefined,
channelData: {
...payload.channelData,
slack: {
...(payload.channelData?.slack as Record<string, unknown> | undefined),
blocks: nextBlocks,
},
},
};
}

View File

@ -0,0 +1,68 @@
import { describe, expect, it } from "vitest";
import {
buildAiSnapshotFromChromeMcpSnapshot,
flattenChromeMcpSnapshotToAriaNodes,
} from "./chrome-mcp.snapshot.js";
const snapshot = {
id: "root",
role: "document",
name: "Example",
children: [
{
id: "btn-1",
role: "button",
name: "Continue",
},
{
id: "txt-1",
role: "textbox",
name: "Email",
value: "peter@example.com",
},
],
};
describe("chrome MCP snapshot conversion", () => {
it("flattens structured snapshots into aria-style nodes", () => {
const nodes = flattenChromeMcpSnapshotToAriaNodes(snapshot, 10);
expect(nodes).toEqual([
{
ref: "root",
role: "document",
name: "Example",
value: undefined,
description: undefined,
depth: 0,
},
{
ref: "btn-1",
role: "button",
name: "Continue",
value: undefined,
description: undefined,
depth: 1,
},
{
ref: "txt-1",
role: "textbox",
name: "Email",
value: "peter@example.com",
description: undefined,
depth: 1,
},
]);
});
it("builds AI snapshots that preserve Chrome MCP uids as refs", () => {
const result = buildAiSnapshotFromChromeMcpSnapshot({ root: snapshot });
expect(result.snapshot).toContain('- button "Continue" [ref=btn-1]');
expect(result.snapshot).toContain('- textbox "Email" [ref=txt-1] value="peter@example.com"');
expect(result.refs).toEqual({
"btn-1": { role: "button", name: "Continue" },
"txt-1": { role: "textbox", name: "Email" },
});
expect(result.stats.refs).toBe(2);
});
});

View File

@ -0,0 +1,246 @@
import type { SnapshotAriaNode } from "./client.js";
import {
getRoleSnapshotStats,
type RoleRefMap,
type RoleSnapshotOptions,
} from "./pw-role-snapshot.js";
export type ChromeMcpSnapshotNode = {
id?: string;
role?: string;
name?: string;
value?: string | number | boolean;
description?: string;
children?: ChromeMcpSnapshotNode[];
};
const INTERACTIVE_ROLES = new Set([
"button",
"checkbox",
"combobox",
"link",
"listbox",
"menuitem",
"menuitemcheckbox",
"menuitemradio",
"option",
"radio",
"searchbox",
"slider",
"spinbutton",
"switch",
"tab",
"textbox",
"treeitem",
]);
const CONTENT_ROLES = new Set([
"article",
"cell",
"columnheader",
"gridcell",
"heading",
"listitem",
"main",
"navigation",
"region",
"rowheader",
]);
const STRUCTURAL_ROLES = new Set([
"application",
"directory",
"document",
"generic",
"group",
"ignored",
"list",
"menu",
"menubar",
"none",
"presentation",
"row",
"rowgroup",
"tablist",
"table",
"toolbar",
"tree",
"treegrid",
]);
function normalizeRole(node: ChromeMcpSnapshotNode): string {
const role = typeof node.role === "string" ? node.role.trim().toLowerCase() : "";
return role || "generic";
}
function normalizeString(value: unknown): string | undefined {
if (typeof value === "string") {
const trimmed = value.trim();
return trimmed || undefined;
}
if (typeof value === "number" || typeof value === "boolean") {
return String(value);
}
return undefined;
}
function escapeQuoted(value: string): string {
return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"');
}
function shouldIncludeNode(params: {
role: string;
name?: string;
options?: RoleSnapshotOptions;
}): boolean {
if (params.options?.interactive && !INTERACTIVE_ROLES.has(params.role)) {
return false;
}
if (params.options?.compact && STRUCTURAL_ROLES.has(params.role) && !params.name) {
return false;
}
return true;
}
function shouldCreateRef(role: string, name?: string): boolean {
return INTERACTIVE_ROLES.has(role) || (CONTENT_ROLES.has(role) && Boolean(name));
}
type DuplicateTracker = {
counts: Map<string, number>;
keysByRef: Map<string, string>;
duplicates: Set<string>;
};
function createDuplicateTracker(): DuplicateTracker {
return {
counts: new Map(),
keysByRef: new Map(),
duplicates: new Set(),
};
}
function registerRef(
tracker: DuplicateTracker,
ref: string,
role: string,
name?: string,
): number | undefined {
const key = `${role}:${name ?? ""}`;
const count = tracker.counts.get(key) ?? 0;
tracker.counts.set(key, count + 1);
tracker.keysByRef.set(ref, key);
if (count > 0) {
tracker.duplicates.add(key);
return count;
}
return undefined;
}
export function flattenChromeMcpSnapshotToAriaNodes(
root: ChromeMcpSnapshotNode,
limit = 500,
): SnapshotAriaNode[] {
const boundedLimit = Math.max(1, Math.min(2000, Math.floor(limit)));
const out: SnapshotAriaNode[] = [];
const visit = (node: ChromeMcpSnapshotNode, depth: number) => {
if (out.length >= boundedLimit) {
return;
}
const ref = normalizeString(node.id);
if (ref) {
out.push({
ref,
role: normalizeRole(node),
name: normalizeString(node.name) ?? "",
value: normalizeString(node.value),
description: normalizeString(node.description),
depth,
});
}
for (const child of node.children ?? []) {
visit(child, depth + 1);
if (out.length >= boundedLimit) {
return;
}
}
};
visit(root, 0);
return out;
}
export function buildAiSnapshotFromChromeMcpSnapshot(params: {
root: ChromeMcpSnapshotNode;
options?: RoleSnapshotOptions;
maxChars?: number;
}): {
snapshot: string;
truncated?: boolean;
refs: RoleRefMap;
stats: { lines: number; chars: number; refs: number; interactive: number };
} {
const refs: RoleRefMap = {};
const tracker = createDuplicateTracker();
const lines: string[] = [];
const visit = (node: ChromeMcpSnapshotNode, depth: number) => {
const role = normalizeRole(node);
const name = normalizeString(node.name);
const value = normalizeString(node.value);
const description = normalizeString(node.description);
const maxDepth = params.options?.maxDepth;
if (maxDepth !== undefined && depth > maxDepth) {
return;
}
const includeNode = shouldIncludeNode({ role, name, options: params.options });
if (includeNode) {
let line = `${" ".repeat(depth)}- ${role}`;
if (name) {
line += ` "${escapeQuoted(name)}"`;
}
const ref = normalizeString(node.id);
if (ref && shouldCreateRef(role, name)) {
const nth = registerRef(tracker, ref, role, name);
refs[ref] = nth === undefined ? { role, name } : { role, name, nth };
line += ` [ref=${ref}]`;
}
if (value) {
line += ` value="${escapeQuoted(value)}"`;
}
if (description) {
line += ` description="${escapeQuoted(description)}"`;
}
lines.push(line);
}
for (const child of node.children ?? []) {
visit(child, depth + 1);
}
};
visit(params.root, 0);
for (const [ref, data] of Object.entries(refs)) {
const key = tracker.keysByRef.get(ref);
if (key && !tracker.duplicates.has(key)) {
delete data.nth;
}
}
let snapshot = lines.join("\n");
let truncated = false;
const maxChars =
typeof params.maxChars === "number" && Number.isFinite(params.maxChars) && params.maxChars > 0
? Math.floor(params.maxChars)
: undefined;
if (maxChars && snapshot.length > maxChars) {
snapshot = `${snapshot.slice(0, maxChars)}\n\n[...TRUNCATED - page too large]`;
truncated = true;
}
const stats = getRoleSnapshotStats(snapshot, refs);
return truncated ? { snapshot, truncated, refs, stats } : { snapshot, refs, stats };
}

View File

@ -0,0 +1,108 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import {
listChromeMcpTabs,
openChromeMcpTab,
resetChromeMcpSessionsForTest,
setChromeMcpSessionFactoryForTest,
} from "./chrome-mcp.js";
type ToolCall = {
name: string;
arguments?: Record<string, unknown>;
};
type ChromeMcpSessionFactory = Exclude<
Parameters<typeof setChromeMcpSessionFactoryForTest>[0],
null
>;
type ChromeMcpSession = Awaited<ReturnType<ChromeMcpSessionFactory>>;
function createFakeSession(): ChromeMcpSession {
const callTool = vi.fn(async ({ name }: ToolCall) => {
if (name === "list_pages") {
return {
content: [
{
type: "text",
text: [
"## Pages",
"1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session [selected]",
"2: https://github.com/openclaw/openclaw/pull/45318",
].join("\n"),
},
],
};
}
if (name === "new_page") {
return {
content: [
{
type: "text",
text: [
"## Pages",
"1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session",
"2: https://github.com/openclaw/openclaw/pull/45318",
"3: https://example.com/ [selected]",
].join("\n"),
},
],
};
}
throw new Error(`unexpected tool ${name}`);
});
return {
client: {
callTool,
listTools: vi.fn().mockResolvedValue({ tools: [{ name: "list_pages" }] }),
close: vi.fn().mockResolvedValue(undefined),
connect: vi.fn().mockResolvedValue(undefined),
},
transport: {
pid: 123,
},
ready: Promise.resolve(),
} as unknown as ChromeMcpSession;
}
describe("chrome MCP page parsing", () => {
beforeEach(async () => {
await resetChromeMcpSessionsForTest();
});
it("parses list_pages text responses when structuredContent is missing", async () => {
const factory: ChromeMcpSessionFactory = async () => createFakeSession();
setChromeMcpSessionFactoryForTest(factory);
const tabs = await listChromeMcpTabs("chrome-live");
expect(tabs).toEqual([
{
targetId: "1",
title: "",
url: "https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session",
type: "page",
},
{
targetId: "2",
title: "",
url: "https://github.com/openclaw/openclaw/pull/45318",
type: "page",
},
]);
});
it("parses new_page text responses and returns the created tab", async () => {
const factory: ChromeMcpSessionFactory = async () => createFakeSession();
setChromeMcpSessionFactoryForTest(factory);
const tab = await openChromeMcpTab("chrome-live", "https://example.com/");
expect(tab).toEqual({
targetId: "3",
title: "",
url: "https://example.com/",
type: "page",
});
});
});

488
src/browser/chrome-mcp.ts Normal file
View File

@ -0,0 +1,488 @@
import { randomUUID } from "node:crypto";
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
import type { ChromeMcpSnapshotNode } from "./chrome-mcp.snapshot.js";
import type { BrowserTab } from "./client.js";
import { BrowserProfileUnavailableError, BrowserTabNotFoundError } from "./errors.js";
type ChromeMcpStructuredPage = {
id: number;
url?: string;
selected?: boolean;
};
type ChromeMcpToolResult = {
structuredContent?: Record<string, unknown>;
content?: Array<Record<string, unknown>>;
isError?: boolean;
};
type ChromeMcpSession = {
client: Client;
transport: StdioClientTransport;
ready: Promise<void>;
};
type ChromeMcpSessionFactory = (profileName: string) => Promise<ChromeMcpSession>;
const DEFAULT_CHROME_MCP_COMMAND = "npx";
const DEFAULT_CHROME_MCP_ARGS = [
"-y",
"chrome-devtools-mcp@latest",
"--autoConnect",
"--experimental-page-id-routing",
];
const sessions = new Map<string, ChromeMcpSession>();
let sessionFactory: ChromeMcpSessionFactory | null = null;
function asRecord(value: unknown): Record<string, unknown> | null {
return value && typeof value === "object" && !Array.isArray(value)
? (value as Record<string, unknown>)
: null;
}
function asPages(value: unknown): ChromeMcpStructuredPage[] {
if (!Array.isArray(value)) {
return [];
}
const out: ChromeMcpStructuredPage[] = [];
for (const entry of value) {
const record = asRecord(entry);
if (!record || typeof record.id !== "number") {
continue;
}
out.push({
id: record.id,
url: typeof record.url === "string" ? record.url : undefined,
selected: record.selected === true,
});
}
return out;
}
function parsePageId(targetId: string): number {
const parsed = Number.parseInt(targetId.trim(), 10);
if (!Number.isFinite(parsed)) {
throw new BrowserTabNotFoundError();
}
return parsed;
}
function toBrowserTabs(pages: ChromeMcpStructuredPage[]): BrowserTab[] {
return pages.map((page) => ({
targetId: String(page.id),
title: "",
url: page.url ?? "",
type: "page",
}));
}
function extractStructuredContent(result: ChromeMcpToolResult): Record<string, unknown> {
return asRecord(result.structuredContent) ?? {};
}
function extractTextContent(result: ChromeMcpToolResult): string[] {
const content = Array.isArray(result.content) ? result.content : [];
return content
.map((entry) => {
const record = asRecord(entry);
return record && typeof record.text === "string" ? record.text : "";
})
.filter(Boolean);
}
function extractTextPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] {
const pages: ChromeMcpStructuredPage[] = [];
for (const block of extractTextContent(result)) {
for (const line of block.split(/\r?\n/)) {
const match = line.match(/^\s*(\d+):\s+(.+?)(?:\s+\[(selected)\])?\s*$/i);
if (!match) {
continue;
}
pages.push({
id: Number.parseInt(match[1] ?? "", 10),
url: match[2]?.trim() || undefined,
selected: Boolean(match[3]),
});
}
}
return pages;
}
function extractStructuredPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] {
const structured = asPages(extractStructuredContent(result).pages);
return structured.length > 0 ? structured : extractTextPages(result);
}
function extractSnapshot(result: ChromeMcpToolResult): ChromeMcpSnapshotNode {
const structured = extractStructuredContent(result);
const snapshot = asRecord(structured.snapshot);
if (!snapshot) {
throw new Error("Chrome MCP snapshot response was missing structured snapshot data.");
}
return snapshot as unknown as ChromeMcpSnapshotNode;
}
function extractJsonBlock(text: string): unknown {
const match = text.match(/```json\s*([\s\S]*?)\s*```/i);
const raw = match?.[1]?.trim() || text.trim();
return raw ? JSON.parse(raw) : null;
}
async function createRealSession(profileName: string): Promise<ChromeMcpSession> {
const transport = new StdioClientTransport({
command: DEFAULT_CHROME_MCP_COMMAND,
args: DEFAULT_CHROME_MCP_ARGS,
stderr: "pipe",
});
const client = new Client(
{
name: "openclaw-browser",
version: "0.0.0",
},
{},
);
const ready = (async () => {
try {
await client.connect(transport);
const tools = await client.listTools();
if (!tools.tools.some((tool) => tool.name === "list_pages")) {
throw new Error("Chrome MCP server did not expose the expected navigation tools.");
}
} catch (err) {
await client.close().catch(() => {});
throw new BrowserProfileUnavailableError(
`Chrome MCP existing-session attach failed for profile "${profileName}". ` +
`Make sure Chrome is running, enable chrome://inspect/#remote-debugging, and approve the connection. ` +
`Details: ${String(err)}`,
);
}
})();
return {
client,
transport,
ready,
};
}
async function getSession(profileName: string): Promise<ChromeMcpSession> {
let session = sessions.get(profileName);
if (session && session.transport.pid === null) {
sessions.delete(profileName);
session = undefined;
}
if (!session) {
session = await (sessionFactory ?? createRealSession)(profileName);
sessions.set(profileName, session);
}
try {
await session.ready;
return session;
} catch (err) {
const current = sessions.get(profileName);
if (current?.transport === session.transport) {
sessions.delete(profileName);
}
throw err;
}
}
async function callTool(
profileName: string,
name: string,
args: Record<string, unknown> = {},
): Promise<ChromeMcpToolResult> {
const session = await getSession(profileName);
try {
return (await session.client.callTool({
name,
arguments: args,
})) as ChromeMcpToolResult;
} catch (err) {
sessions.delete(profileName);
await session.client.close().catch(() => {});
throw err;
}
}
async function withTempFile<T>(fn: (filePath: string) => Promise<T>): Promise<T> {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-chrome-mcp-"));
const filePath = path.join(dir, randomUUID());
try {
return await fn(filePath);
} finally {
await fs.rm(dir, { recursive: true, force: true }).catch(() => {});
}
}
async function findPageById(profileName: string, pageId: number): Promise<ChromeMcpStructuredPage> {
const pages = await listChromeMcpPages(profileName);
const page = pages.find((entry) => entry.id === pageId);
if (!page) {
throw new BrowserTabNotFoundError();
}
return page;
}
export async function ensureChromeMcpAvailable(profileName: string): Promise<void> {
await getSession(profileName);
}
export function getChromeMcpPid(profileName: string): number | null {
return sessions.get(profileName)?.transport.pid ?? null;
}
export async function closeChromeMcpSession(profileName: string): Promise<boolean> {
const session = sessions.get(profileName);
if (!session) {
return false;
}
sessions.delete(profileName);
await session.client.close().catch(() => {});
return true;
}
export async function stopAllChromeMcpSessions(): Promise<void> {
const names = [...sessions.keys()];
for (const name of names) {
await closeChromeMcpSession(name).catch(() => {});
}
}
export async function listChromeMcpPages(profileName: string): Promise<ChromeMcpStructuredPage[]> {
const result = await callTool(profileName, "list_pages");
return extractStructuredPages(result);
}
export async function listChromeMcpTabs(profileName: string): Promise<BrowserTab[]> {
return toBrowserTabs(await listChromeMcpPages(profileName));
}
export async function openChromeMcpTab(profileName: string, url: string): Promise<BrowserTab> {
const result = await callTool(profileName, "new_page", { url });
const pages = extractStructuredPages(result);
const chosen = pages.find((page) => page.selected) ?? pages.at(-1);
if (!chosen) {
throw new Error("Chrome MCP did not return the created page.");
}
return {
targetId: String(chosen.id),
title: "",
url: chosen.url ?? url,
type: "page",
};
}
export async function focusChromeMcpTab(profileName: string, targetId: string): Promise<void> {
await callTool(profileName, "select_page", {
pageId: parsePageId(targetId),
bringToFront: true,
});
}
export async function closeChromeMcpTab(profileName: string, targetId: string): Promise<void> {
await callTool(profileName, "close_page", { pageId: parsePageId(targetId) });
}
export async function navigateChromeMcpPage(params: {
profileName: string;
targetId: string;
url: string;
timeoutMs?: number;
}): Promise<{ url: string }> {
await callTool(params.profileName, "navigate_page", {
pageId: parsePageId(params.targetId),
type: "url",
url: params.url,
...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}),
});
const page = await findPageById(params.profileName, parsePageId(params.targetId));
return { url: page.url ?? params.url };
}
export async function takeChromeMcpSnapshot(params: {
profileName: string;
targetId: string;
}): Promise<ChromeMcpSnapshotNode> {
const result = await callTool(params.profileName, "take_snapshot", {
pageId: parsePageId(params.targetId),
});
return extractSnapshot(result);
}
export async function takeChromeMcpScreenshot(params: {
profileName: string;
targetId: string;
uid?: string;
fullPage?: boolean;
format?: "png" | "jpeg";
}): Promise<Buffer> {
return await withTempFile(async (filePath) => {
await callTool(params.profileName, "take_screenshot", {
pageId: parsePageId(params.targetId),
filePath,
format: params.format ?? "png",
...(params.uid ? { uid: params.uid } : {}),
...(params.fullPage ? { fullPage: true } : {}),
});
return await fs.readFile(filePath);
});
}
export async function clickChromeMcpElement(params: {
profileName: string;
targetId: string;
uid: string;
doubleClick?: boolean;
}): Promise<void> {
await callTool(params.profileName, "click", {
pageId: parsePageId(params.targetId),
uid: params.uid,
...(params.doubleClick ? { dblClick: true } : {}),
});
}
export async function fillChromeMcpElement(params: {
profileName: string;
targetId: string;
uid: string;
value: string;
}): Promise<void> {
await callTool(params.profileName, "fill", {
pageId: parsePageId(params.targetId),
uid: params.uid,
value: params.value,
});
}
export async function fillChromeMcpForm(params: {
profileName: string;
targetId: string;
elements: Array<{ uid: string; value: string }>;
}): Promise<void> {
await callTool(params.profileName, "fill_form", {
pageId: parsePageId(params.targetId),
elements: params.elements,
});
}
export async function hoverChromeMcpElement(params: {
profileName: string;
targetId: string;
uid: string;
}): Promise<void> {
await callTool(params.profileName, "hover", {
pageId: parsePageId(params.targetId),
uid: params.uid,
});
}
export async function dragChromeMcpElement(params: {
profileName: string;
targetId: string;
fromUid: string;
toUid: string;
}): Promise<void> {
await callTool(params.profileName, "drag", {
pageId: parsePageId(params.targetId),
from_uid: params.fromUid,
to_uid: params.toUid,
});
}
export async function uploadChromeMcpFile(params: {
profileName: string;
targetId: string;
uid: string;
filePath: string;
}): Promise<void> {
await callTool(params.profileName, "upload_file", {
pageId: parsePageId(params.targetId),
uid: params.uid,
filePath: params.filePath,
});
}
export async function pressChromeMcpKey(params: {
profileName: string;
targetId: string;
key: string;
}): Promise<void> {
await callTool(params.profileName, "press_key", {
pageId: parsePageId(params.targetId),
key: params.key,
});
}
export async function resizeChromeMcpPage(params: {
profileName: string;
targetId: string;
width: number;
height: number;
}): Promise<void> {
await callTool(params.profileName, "resize_page", {
pageId: parsePageId(params.targetId),
width: params.width,
height: params.height,
});
}
export async function handleChromeMcpDialog(params: {
profileName: string;
targetId: string;
action: "accept" | "dismiss";
promptText?: string;
}): Promise<void> {
await callTool(params.profileName, "handle_dialog", {
pageId: parsePageId(params.targetId),
action: params.action,
...(params.promptText ? { promptText: params.promptText } : {}),
});
}
export async function evaluateChromeMcpScript(params: {
profileName: string;
targetId: string;
fn: string;
args?: string[];
}): Promise<unknown> {
const result = await callTool(params.profileName, "evaluate_script", {
pageId: parsePageId(params.targetId),
function: params.fn,
...(params.args?.length ? { args: params.args } : {}),
});
const message = extractStructuredContent(result).message;
const text = typeof message === "string" ? message : "";
if (!text.trim()) {
return null;
}
return extractJsonBlock(text);
}
export async function waitForChromeMcpText(params: {
profileName: string;
targetId: string;
text: string[];
timeoutMs?: number;
}): Promise<void> {
await callTool(params.profileName, "wait_for", {
pageId: parsePageId(params.targetId),
text: params.text,
...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}),
});
}
export function setChromeMcpSessionFactoryForTest(factory: ChromeMcpSessionFactory | null): void {
sessionFactory = factory;
}
export async function resetChromeMcpSessionsForTest(): Promise<void> {
sessionFactory = null;
await stopAllChromeMcpSessions();
}

View File

@ -3,6 +3,7 @@ import { fetchBrowserJson } from "./client-fetch.js";
export type BrowserStatus = {
enabled: boolean;
profile?: string;
driver?: "openclaw" | "extension" | "existing-session";
running: boolean;
cdpReady?: boolean;
cdpHttp?: boolean;
@ -26,6 +27,7 @@ export type ProfileStatus = {
cdpPort: number;
cdpUrl: string;
color: string;
driver: "openclaw" | "extension" | "existing-session";
running: boolean;
tabCount: number;
isDefault: boolean;
@ -165,7 +167,7 @@ export async function browserCreateProfile(
name: string;
color?: string;
cdpUrl?: string;
driver?: "openclaw" | "extension";
driver?: "openclaw" | "extension" | "existing-session";
},
): Promise<BrowserCreateProfileResult> {
return await fetchBrowserJson<BrowserCreateProfileResult>(

View File

@ -46,7 +46,7 @@ export type ResolvedBrowserProfile = {
cdpHost: string;
cdpIsLoopback: boolean;
color: string;
driver: "openclaw" | "extension";
driver: "openclaw" | "extension" | "existing-session";
attachOnly: boolean;
};
@ -335,7 +335,12 @@ export function resolveProfile(
let cdpHost = resolved.cdpHost;
let cdpPort = profile.cdpPort ?? 0;
let cdpUrl = "";
const driver = profile.driver === "extension" ? "extension" : "openclaw";
const driver =
profile.driver === "extension"
? "extension"
: profile.driver === "existing-session"
? "existing-session"
: "openclaw";
if (rawProfileUrl) {
const parsed = parseHttpUrl(rawProfileUrl, `browser.profiles.${profileName}.cdpUrl`);
@ -356,7 +361,7 @@ export function resolveProfile(
cdpIsLoopback: isLoopbackHost(cdpHost),
color: profile.color,
driver,
attachOnly: profile.attachOnly ?? resolved.attachOnly,
attachOnly: driver === "existing-session" ? true : (profile.attachOnly ?? resolved.attachOnly),
};
}

View File

@ -1,6 +1,10 @@
import type { ResolvedBrowserProfile } from "./config.js";
export type BrowserProfileMode = "local-managed" | "local-extension-relay" | "remote-cdp";
export type BrowserProfileMode =
| "local-managed"
| "local-extension-relay"
| "local-existing-session"
| "remote-cdp";
export type BrowserProfileCapabilities = {
mode: BrowserProfileMode;
@ -31,6 +35,20 @@ export function getBrowserProfileCapabilities(
};
}
if (profile.driver === "existing-session") {
return {
mode: "local-existing-session",
isRemote: false,
requiresRelay: false,
requiresAttachedTab: false,
usesPersistentPlaywright: false,
supportsPerTabWs: false,
supportsJsonTabEndpoints: false,
supportsReset: false,
supportsManagedTabLimit: false,
};
}
if (!profile.cdpIsLoopback) {
return {
mode: "remote-cdp",
@ -75,6 +93,9 @@ export function resolveDefaultSnapshotFormat(params: {
if (capabilities.mode === "local-extension-relay") {
return "aria";
}
if (capabilities.mode === "local-existing-session") {
return "ai";
}
return params.hasPlaywright ? "ai" : "aria";
}

View File

@ -1,6 +1,6 @@
import fs from "node:fs";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import { beforeEach, describe, expect, it, vi } from "vitest";
import { resolveBrowserConfig } from "./config.js";
import { createBrowserProfilesService } from "./profiles-service.js";
import type { BrowserRouteContext, BrowserServerState } from "./server-context.js";
@ -57,6 +57,10 @@ async function createWorkProfileWithConfig(params: {
}
describe("BrowserProfilesService", () => {
beforeEach(() => {
vi.clearAllMocks();
});
it("allocates next local port for new profiles", async () => {
const { result, state } = await createWorkProfileWithConfig({
resolved: resolveBrowserConfig({}),
@ -163,6 +167,56 @@ describe("BrowserProfilesService", () => {
).rejects.toThrow(/requires an explicit loopback cdpUrl/i);
});
it("creates existing-session profiles as attach-only local entries", async () => {
const resolved = resolveBrowserConfig({});
const { ctx, state } = createCtx(resolved);
vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } });
const service = createBrowserProfilesService(ctx);
const result = await service.createProfile({
name: "chrome-live",
driver: "existing-session",
});
expect(result.cdpPort).toBe(18801);
expect(result.isRemote).toBe(false);
expect(state.resolved.profiles["chrome-live"]).toEqual({
cdpPort: 18801,
driver: "existing-session",
attachOnly: true,
color: expect.any(String),
});
expect(writeConfigFile).toHaveBeenCalledWith(
expect.objectContaining({
browser: expect.objectContaining({
profiles: expect.objectContaining({
"chrome-live": expect.objectContaining({
cdpPort: 18801,
driver: "existing-session",
attachOnly: true,
}),
}),
}),
}),
);
});
it("rejects driver=existing-session when cdpUrl is provided", async () => {
const resolved = resolveBrowserConfig({});
const { ctx } = createCtx(resolved);
vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } });
const service = createBrowserProfilesService(ctx);
await expect(
service.createProfile({
name: "chrome-live",
driver: "existing-session",
cdpUrl: "http://127.0.0.1:9222",
}),
).rejects.toThrow(/does not accept cdpUrl/i);
});
it("deletes remote profiles without stopping or removing local data", async () => {
const resolved = resolveBrowserConfig({
profiles: {
@ -218,4 +272,40 @@ describe("BrowserProfilesService", () => {
expect(result.deleted).toBe(true);
expect(movePathToTrash).toHaveBeenCalledWith(path.dirname(userDataDir));
});
it("deletes existing-session profiles without touching local browser data", async () => {
const resolved = resolveBrowserConfig({
profiles: {
"chrome-live": {
cdpPort: 18801,
color: "#0066CC",
driver: "existing-session",
attachOnly: true,
},
},
});
const { ctx } = createCtx(resolved);
vi.mocked(loadConfig).mockReturnValue({
browser: {
defaultProfile: "openclaw",
profiles: {
openclaw: { cdpPort: 18800, color: "#FF4500" },
"chrome-live": {
cdpPort: 18801,
color: "#0066CC",
driver: "existing-session",
attachOnly: true,
},
},
},
});
const service = createBrowserProfilesService(ctx);
const result = await service.deleteProfile("chrome-live");
expect(result.deleted).toBe(false);
expect(ctx.forProfile).not.toHaveBeenCalled();
expect(movePathToTrash).not.toHaveBeenCalled();
});
});

View File

@ -27,7 +27,7 @@ export type CreateProfileParams = {
name: string;
color?: string;
cdpUrl?: string;
driver?: "openclaw" | "extension";
driver?: "openclaw" | "extension" | "existing-session";
};
export type CreateProfileResult = {
@ -79,7 +79,12 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
const createProfile = async (params: CreateProfileParams): Promise<CreateProfileResult> => {
const name = params.name.trim();
const rawCdpUrl = params.cdpUrl?.trim() || undefined;
const driver = params.driver === "extension" ? "extension" : undefined;
const driver =
params.driver === "extension"
? "extension"
: params.driver === "existing-session"
? "existing-session"
: undefined;
if (!isValidProfileName(name)) {
throw new BrowserValidationError(
@ -118,6 +123,11 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
);
}
}
if (driver === "existing-session") {
throw new BrowserValidationError(
"driver=existing-session does not accept cdpUrl; it attaches via the Chrome MCP auto-connect flow",
);
}
profileConfig = {
cdpUrl: parsed.normalized,
...(driver ? { driver } : {}),
@ -136,6 +146,7 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
profileConfig = {
cdpPort,
...(driver ? { driver } : {}),
...(driver === "existing-session" ? { attachOnly: true } : {}),
color: profileColor,
};
}
@ -195,7 +206,7 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
const state = ctx.state();
const resolved = resolveProfile(state.resolved, name);
if (resolved?.cdpIsLoopback) {
if (resolved?.cdpIsLoopback && resolved.driver === "openclaw") {
try {
await ctx.forProfile(name).stopRunningBrowser();
} catch {

Some files were not shown because too many files have changed in this diff Show More