Merge branch 'main' into feat/vnc

This commit is contained in:
赵一寰 2026-03-14 10:32:57 +08:00
commit 8b49050cb2
981 changed files with 53366 additions and 29415 deletions

View File

@ -1,5 +1,11 @@
.git
.worktrees
# Sensitive files docker-setup.sh writes .env with OPENCLAW_GATEWAY_TOKEN
# into the project root; keep it out of the build context.
.env
.env.*
.bun-cache
.bun
.tmp

View File

@ -49,7 +49,7 @@ runs:
exit 1
- name: Setup Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
uses: actions/setup-node@v6
with:
node-version: ${{ inputs.node-version }}
check-latest: false
@ -63,7 +63,7 @@ runs:
- name: Setup Bun
if: inputs.install-bun == 'true'
uses: oven-sh/setup-bun@v2
uses: oven-sh/setup-bun@v2.1.3
with:
bun-version: "1.3.9"

View File

@ -61,14 +61,14 @@ runs:
- name: Restore pnpm store cache (exact key only)
# PRs that request sticky disks still need a safe cache restore path.
if: inputs.use-actions-cache == 'true' && (inputs.use-sticky-disk != 'true' || github.event_name == 'pull_request') && inputs.use-restore-keys != 'true'
uses: actions/cache@v4
uses: actions/cache@v5
with:
path: ${{ steps.pnpm-store.outputs.path }}
key: ${{ runner.os }}-pnpm-store-${{ inputs.cache-key-suffix }}-${{ hashFiles('pnpm-lock.yaml') }}
- name: Restore pnpm store cache (with fallback keys)
if: inputs.use-actions-cache == 'true' && (inputs.use-sticky-disk != 'true' || github.event_name == 'pull_request') && inputs.use-restore-keys == 'true'
uses: actions/cache@v4
uses: actions/cache@v5
with:
path: ${{ steps.pnpm-store.outputs.path }}
key: ${{ runner.os }}-pnpm-store-${{ inputs.cache-key-suffix }}-${{ hashFiles('pnpm-lock.yaml') }}

View File

@ -5,9 +5,12 @@ on:
types: [opened, edited, labeled]
issue_comment:
types: [created]
pull_request_target:
pull_request_target: # zizmor: ignore[dangerous-triggers] maintainer-owned label automation; no untrusted checkout or code execution
types: [labeled]
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
permissions: {}
jobs:
@ -17,20 +20,20 @@ jobs:
pull-requests: write
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
- uses: actions/create-github-app-token@v2
id: app-token
continue-on-error: true
with:
app-id: "2729701"
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
- uses: actions/create-github-app-token@v2
id: app-token-fallback
if: steps.app-token.outcome == 'failure'
with:
app-id: "2971289"
private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }}
- name: Handle labeled items
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
uses: actions/github-script@v8
with:
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
script: |

View File

@ -7,7 +7,10 @@ on:
concurrency:
group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
cancel-in-progress: true
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
jobs:
# Detect docs-only changes to skip heavy jobs (test, build, Windows, macOS, Android).
@ -19,7 +22,7 @@ jobs:
docs_changed: ${{ steps.check.outputs.docs_changed }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
fetch-depth: 1
fetch-tags: false
@ -35,9 +38,8 @@ jobs:
id: check
uses: ./.github/actions/detect-docs-changes
# Detect which heavy areas are touched so PRs can skip unrelated expensive jobs.
# Push to main keeps broad coverage, but this job still needs to run so
# downstream jobs that list it in `needs` are not skipped.
# Detect which heavy areas are touched so CI can skip unrelated expensive jobs.
# Fail-safe: if detection fails, downstream jobs run.
changed-scope:
needs: [docs-scope]
if: needs.docs-scope.outputs.docs_only != 'true'
@ -50,7 +52,7 @@ jobs:
run_windows: ${{ steps.scope.outputs.run_windows }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
fetch-depth: 1
fetch-tags: false
@ -79,11 +81,11 @@ jobs:
# Build dist once for Node-relevant changes and share it with downstream jobs.
build-artifacts:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
@ -98,13 +100,13 @@ jobs:
uses: ./.github/actions/setup-node-env
with:
install-bun: "false"
use-sticky-disk: "true"
use-sticky-disk: "false"
- name: Build dist
run: pnpm build
- name: Upload dist artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v7
with:
name: dist-build
path: dist/
@ -117,7 +119,7 @@ jobs:
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
@ -125,10 +127,10 @@ jobs:
uses: ./.github/actions/setup-node-env
with:
install-bun: "false"
use-sticky-disk: "true"
use-sticky-disk: "false"
- name: Download dist artifact
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
name: dist-build
path: dist/
@ -138,7 +140,7 @@ jobs:
checks:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
strategy:
fail-fast: false
@ -146,6 +148,13 @@ jobs:
include:
- runtime: node
task: test
shard_index: 1
shard_count: 2
command: pnpm canvas:a2ui:bundle && pnpm test
- runtime: node
task: test
shard_index: 2
shard_count: 2
command: pnpm canvas:a2ui:bundle && pnpm test
- runtime: node
task: extensions
@ -157,44 +166,51 @@ jobs:
task: test
command: pnpm canvas:a2ui:bundle && bunx vitest run --config vitest.unit.config.ts
steps:
- name: Skip bun lane on push
if: github.event_name == 'push' && matrix.runtime == 'bun'
run: echo "Skipping bun test lane on push events."
- name: Skip bun lane on pull requests
if: github.event_name == 'pull_request' && matrix.runtime == 'bun'
run: echo "Skipping Bun compatibility lane on pull requests."
- name: Checkout
if: github.event_name != 'push' || matrix.runtime != 'bun'
uses: actions/checkout@v4
if: github.event_name != 'pull_request' || matrix.runtime != 'bun'
uses: actions/checkout@v6
with:
submodules: false
- name: Setup Node environment
if: matrix.runtime != 'bun' || github.event_name != 'push'
if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
uses: ./.github/actions/setup-node-env
with:
install-bun: "${{ matrix.runtime == 'bun' }}"
use-sticky-disk: "true"
use-sticky-disk: "false"
- name: Configure Node test resources
if: (github.event_name != 'push' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
if: (github.event_name != 'pull_request' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
env:
SHARD_COUNT: ${{ matrix.shard_count || '' }}
SHARD_INDEX: ${{ matrix.shard_index || '' }}
run: |
# `pnpm test` runs `scripts/test-parallel.mjs`, which spawns multiple Node processes.
# Default heap limits have been too low on Linux CI (V8 OOM near 4GB).
echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV"
echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV"
if [ -n "$SHARD_COUNT" ] && [ -n "$SHARD_INDEX" ]; then
echo "OPENCLAW_TEST_SHARDS=$SHARD_COUNT" >> "$GITHUB_ENV"
echo "OPENCLAW_TEST_SHARD_INDEX=$SHARD_INDEX" >> "$GITHUB_ENV"
fi
- name: Run ${{ matrix.task }} (${{ matrix.runtime }})
if: matrix.runtime != 'bun' || github.event_name != 'push'
if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
run: ${{ matrix.command }}
# Types, lint, and format check.
check:
name: "check"
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
@ -202,7 +218,7 @@ jobs:
uses: ./.github/actions/setup-node-env
with:
install-bun: "false"
use-sticky-disk: "true"
use-sticky-disk: "false"
- name: Check types and lint and oxfmt
run: pnpm check
@ -220,7 +236,7 @@ jobs:
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
@ -228,7 +244,7 @@ jobs:
uses: ./.github/actions/setup-node-env
with:
install-bun: "false"
use-sticky-disk: "true"
use-sticky-disk: "false"
- name: Check docs
run: pnpm check:docs
@ -236,11 +252,11 @@ jobs:
compat-node22:
name: "compat-node22"
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
@ -250,7 +266,7 @@ jobs:
node-version: "22.x"
cache-key-suffix: "node22"
install-bun: "false"
use-sticky-disk: "true"
use-sticky-disk: "false"
- name: Configure Node 22 test resources
run: |
@ -269,16 +285,16 @@ jobs:
skills-python:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true' || needs.changed-scope.outputs.run_skills_python == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_skills_python == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
- name: Setup Python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: "3.12"
@ -297,7 +313,7 @@ jobs:
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
@ -316,7 +332,7 @@ jobs:
- name: Setup Python
id: setup-python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: "3.12"
cache: "pip"
@ -326,7 +342,7 @@ jobs:
.github/workflows/ci.yml
- name: Restore pre-commit cache
uses: actions/cache@v4
uses: actions/cache@v5
with:
path: ~/.cache/pre-commit
key: pre-commit-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('.pre-commit-config.yaml') }}
@ -362,7 +378,7 @@ jobs:
checks-windows:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_windows == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_windows == 'true'
runs-on: blacksmith-32vcpu-windows-2025
timeout-minutes: 45
env:
@ -409,7 +425,7 @@ jobs:
command: pnpm test
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
@ -433,7 +449,7 @@ jobs:
}
- name: Setup Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
uses: actions/setup-node@v6
with:
node-version: 24.x
check-latest: false
@ -495,7 +511,7 @@ jobs:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
@ -531,7 +547,7 @@ jobs:
swiftformat --lint apps/macos/Sources --config .swiftformat
- name: Cache SwiftPM
uses: actions/cache@v4
uses: actions/cache@v5
with:
path: ~/Library/Caches/org.swift.swiftpm
key: ${{ runner.os }}-swiftpm-${{ hashFiles('apps/macos/Package.resolved') }}
@ -567,7 +583,7 @@ jobs:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
@ -724,7 +740,7 @@ jobs:
android:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_android == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_android == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
strategy:
fail-fast: false
@ -736,31 +752,45 @@ jobs:
command: ./gradlew --no-daemon :app:assembleDebug
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
- name: Setup Java
uses: actions/setup-java@v4
uses: actions/setup-java@v5
with:
distribution: temurin
# setup-android's sdkmanager currently crashes on JDK 21 in CI.
# Keep sdkmanager on the stable JDK path for Linux CI runners.
java-version: 17
- name: Setup Android SDK
uses: android-actions/setup-android@v3
with:
accept-android-sdk-licenses: false
- name: Setup Android SDK cmdline-tools
run: |
set -euo pipefail
ANDROID_SDK_ROOT="$HOME/.android-sdk"
CMDLINE_TOOLS_VERSION="12266719"
ARCHIVE="commandlinetools-linux-${CMDLINE_TOOLS_VERSION}_latest.zip"
URL="https://dl.google.com/android/repository/${ARCHIVE}"
mkdir -p "$ANDROID_SDK_ROOT/cmdline-tools"
curl -fsSL "$URL" -o "/tmp/${ARCHIVE}"
rm -rf "$ANDROID_SDK_ROOT/cmdline-tools/latest"
unzip -q "/tmp/${ARCHIVE}" -d "$ANDROID_SDK_ROOT/cmdline-tools"
mv "$ANDROID_SDK_ROOT/cmdline-tools/cmdline-tools" "$ANDROID_SDK_ROOT/cmdline-tools/latest"
echo "ANDROID_SDK_ROOT=$ANDROID_SDK_ROOT" >> "$GITHUB_ENV"
echo "ANDROID_HOME=$ANDROID_SDK_ROOT" >> "$GITHUB_ENV"
echo "$ANDROID_SDK_ROOT/cmdline-tools/latest/bin" >> "$GITHUB_PATH"
echo "$ANDROID_SDK_ROOT/platform-tools" >> "$GITHUB_PATH"
- name: Setup Gradle
uses: gradle/actions/setup-gradle@v4
uses: gradle/actions/setup-gradle@v5
with:
gradle-version: 8.11.1
- name: Install Android SDK packages
run: |
yes | sdkmanager --licenses >/dev/null
sdkmanager --install \
yes | sdkmanager --sdk_root="${ANDROID_SDK_ROOT}" --licenses >/dev/null
sdkmanager --sdk_root="${ANDROID_SDK_ROOT}" --install \
"platform-tools" \
"platforms;android-36" \
"build-tools;36.0.0"

View File

@ -7,6 +7,9 @@ concurrency:
group: codeql-${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
permissions:
actions: read
contents: read
@ -67,7 +70,7 @@ jobs:
config_file: ""
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
@ -76,17 +79,17 @@ jobs:
uses: ./.github/actions/setup-node-env
with:
install-bun: "false"
use-sticky-disk: "true"
use-sticky-disk: "false"
- name: Setup Python
if: matrix.needs_python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: "3.12"
- name: Setup Java
if: matrix.needs_java
uses: actions/setup-java@v4
uses: actions/setup-java@v5
with:
distribution: temurin
java-version: "21"

View File

@ -18,6 +18,7 @@ concurrency:
cancel-in-progress: false
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
@ -33,13 +34,13 @@ jobs:
slim-digest: ${{ steps.build-slim.outputs.digest }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: Set up Docker Builder
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v4
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@v4
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.repository_owner }}
@ -134,13 +135,13 @@ jobs:
slim-digest: ${{ steps.build-slim.outputs.digest }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: Set up Docker Builder
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v4
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@v4
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.repository_owner }}
@ -233,10 +234,10 @@ jobs:
needs: [build-amd64, build-arm64]
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@v4
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.repository_owner }}

View File

@ -10,6 +10,9 @@ concurrency:
group: install-smoke-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
jobs:
docs-scope:
runs-on: blacksmith-16vcpu-ubuntu-2404
@ -17,7 +20,7 @@ jobs:
docs_only: ${{ steps.check.outputs.docs_only }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
fetch-depth: 1
fetch-tags: false
@ -38,10 +41,10 @@ jobs:
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout CLI
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: Set up Docker Builder
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v4
# Blacksmith can fall back to the local docker driver, which rejects gha
# cache export/import. Keep smoke builds driver-agnostic.

View File

@ -1,7 +1,7 @@
name: Labeler
on:
pull_request_target:
pull_request_target: # zizmor: ignore[dangerous-triggers] maintainer-owned triage workflow; no untrusted checkout or PR code execution
types: [opened, synchronize, reopened]
issues:
types: [opened]
@ -16,6 +16,9 @@ on:
required: false
default: "50"
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
permissions: {}
jobs:
@ -25,25 +28,25 @@ jobs:
pull-requests: write
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
- uses: actions/create-github-app-token@v2
id: app-token
continue-on-error: true
with:
app-id: "2729701"
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
- uses: actions/create-github-app-token@v2
id: app-token-fallback
if: steps.app-token.outcome == 'failure'
with:
app-id: "2971289"
private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }}
- uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5
- uses: actions/labeler@v6
with:
configuration-path: .github/labeler.yml
repo-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
sync-labels: true
- name: Apply PR size label
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
uses: actions/github-script@v8
with:
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
script: |
@ -132,7 +135,7 @@ jobs:
labels: [targetSizeLabel],
});
- name: Apply maintainer or trusted-contributor label
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
uses: actions/github-script@v8
with:
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
script: |
@ -203,7 +206,7 @@ jobs:
// });
// }
- name: Apply too-many-prs label
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
uses: actions/github-script@v8
with:
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
script: |
@ -381,20 +384,20 @@ jobs:
pull-requests: write
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
- uses: actions/create-github-app-token@v2
id: app-token
continue-on-error: true
with:
app-id: "2729701"
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
- uses: actions/create-github-app-token@v2
id: app-token-fallback
if: steps.app-token.outcome == 'failure'
with:
app-id: "2971289"
private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }}
- name: Backfill PR labels
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
uses: actions/github-script@v8
with:
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
script: |
@ -629,20 +632,20 @@ jobs:
issues: write
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
- uses: actions/create-github-app-token@v2
id: app-token
continue-on-error: true
with:
app-id: "2729701"
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
- uses: actions/create-github-app-token@v2
id: app-token-fallback
if: steps.app-token.outcome == 'failure'
with:
app-id: "2971289"
private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }}
- name: Apply maintainer or trusted-contributor label
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
uses: actions/github-script@v8
with:
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
script: |

View File

@ -10,6 +10,7 @@ concurrency:
cancel-in-progress: false
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
NODE_VERSION: "24.x"
PNPM_VERSION: "10.23.0"
@ -22,7 +23,7 @@ jobs:
id-token: write
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
fetch-depth: 0

View File

@ -17,17 +17,20 @@ concurrency:
group: sandbox-common-smoke-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
jobs:
sandbox-common-smoke:
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
submodules: false
- name: Set up Docker Builder
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v4
- name: Build minimal sandbox base (USER sandbox)
shell: bash

View File

@ -5,6 +5,9 @@ on:
- cron: "17 3 * * *"
workflow_dispatch:
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
permissions: {}
jobs:
@ -14,13 +17,13 @@ jobs:
pull-requests: write
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
- uses: actions/create-github-app-token@v2
id: app-token
continue-on-error: true
with:
app-id: "2729701"
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
- uses: actions/create-github-app-token@v2
id: app-token-fallback
continue-on-error: true
with:
@ -29,7 +32,7 @@ jobs:
- name: Mark stale issues and pull requests (primary)
id: stale-primary
continue-on-error: true
uses: actions/stale@v9
uses: actions/stale@v10
with:
repo-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
days-before-issue-stale: 7
@ -62,7 +65,7 @@ jobs:
- name: Check stale state cache
id: stale-state
if: always()
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
uses: actions/github-script@v8
with:
github-token: ${{ steps.app-token-fallback.outputs.token || steps.app-token.outputs.token }}
script: |
@ -85,7 +88,7 @@ jobs:
}
- name: Mark stale issues and pull requests (fallback)
if: (steps.stale-primary.outcome == 'failure' || steps.stale-state.outputs.has_state == 'true') && steps.app-token-fallback.outputs.token != ''
uses: actions/stale@v9
uses: actions/stale@v10
with:
repo-token: ${{ steps.app-token-fallback.outputs.token }}
days-before-issue-stale: 7
@ -121,13 +124,13 @@ jobs:
issues: write
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
- uses: actions/create-github-app-token@v2
id: app-token
with:
app-id: "2729701"
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- name: Lock closed issues after 48h of no comments
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
uses: actions/github-script@v8
with:
github-token: ${{ steps.app-token.outputs.token }}
script: |

View File

@ -9,12 +9,15 @@ concurrency:
group: workflow-sanity-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
jobs:
no-tabs:
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: Fail on tabs in workflow files
run: |
@ -45,7 +48,7 @@ jobs:
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: Install actionlint
shell: bash

8
.gitignore vendored
View File

@ -1,6 +1,7 @@
node_modules
**/node_modules/
.env
docker-compose.override.yml
docker-compose.extra.yml
dist
pnpm-lock.yaml
@ -128,6 +129,7 @@ docs/superpowers/specs/2026-03-10-collapsed-side-nav-design.md
.gitignore
test/config-form.analyze.telegram.test.ts
ui/src/ui/theme-variants.browser.test.ts
ui/src/ui/__screenshots__/navigation.browser.test.ts/control-UI-routing-auto-scrolls-chat-history-to-the-latest-message-1.png
ui/src/ui/__screenshots__/navigation.browser.test.ts/control-UI-routing-auto-scrolls-chat-history-to-the-latest-message-1.png
ui/src/ui/__screenshots__/navigation.browser.test.ts/control-UI-routing-auto-scrolls-chat-history-to-the-latest-message-1.png
ui/src/ui/__screenshots__
ui/src/ui/views/__screenshots__
ui/.vitest-attachments
docs/superpowers

16
.jscpd.json Normal file
View File

@ -0,0 +1,16 @@
{
"gitignore": true,
"noSymlinks": true,
"ignore": [
"**/node_modules/**",
"**/dist/**",
"dist/**",
"**/.git/**",
"**/coverage/**",
"**/build/**",
"**/.build/**",
"**/.artifacts/**",
"docs/zh-CN/**",
"**/CHANGELOG.md"
]
}

View File

@ -132,6 +132,7 @@
- Framework: Vitest with V8 coverage thresholds (70% lines/branches/functions/statements).
- Naming: match source names with `*.test.ts`; e2e in `*.e2e.test.ts`.
- Run `pnpm test` (or `pnpm test:coverage`) before pushing when you touch logic.
- For targeted/local debugging, keep using the wrapper: `pnpm test -- <path-or-filter> [vitest args...]` (for example `pnpm test -- src/commands/onboard-search.test.ts -t "shows registered plugin providers"`); do not default to raw `pnpm vitest run ...` because it bypasses wrapper config/profile/pool routing.
- Do not set test workers above 16; tried already.
- If local Vitest runs cause memory pressure (common on non-Mac-Studio hosts), use `OPENCLAW_TEST_PROFILE=low OPENCLAW_TEST_SERIAL_GATEWAY=1 pnpm test` for land/gate runs.
- Live tests (real keys): `CLAWDBOT_LIVE_TEST=1 pnpm test:live` (OpenClaw-only) or `LIVE=1 pnpm test:live` (includes provider live tests). Docker: `pnpm test:docker:live-models`, `pnpm test:docker:live-gateway`. Onboarding Docker E2E: `pnpm test:docker:onboard`.
@ -201,6 +202,35 @@
## Agent-Specific Notes
- Vocabulary: "makeup" = "mac app".
- Parallels macOS retests: use the snapshot most closely named like `macOS 26.3.1 fresh` when the user asks for a clean/fresh macOS rerun; avoid older Tahoe snapshots unless explicitly requested.
- Parallels macOS smoke playbook:
- `prlctl exec` is fine for deterministic repo commands, but it can misrepresent interactive shell behavior (`PATH`, `HOME`, `curl | bash`, shebang resolution). For installer parity or shell-sensitive repros, prefer the guest Terminal or `prlctl enter`.
- Fresh Tahoe snapshot current reality: `brew` exists, `node` may not be on `PATH` in noninteractive guest exec. Use absolute `/opt/homebrew/bin/node` for repo/CLI runs when needed.
- Preferred automation entrypoint: `pnpm test:parallels:macos`. It restores the snapshot most closely matching `macOS 26.3.1 fresh`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes.
- Gateway verification in smoke runs should use `openclaw gateway status --deep --require-rpc`, not plain `--deep`, so probe failures go non-zero.
- Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-smoke.*`.
- Fresh host-served tgz install: restore fresh snapshot, install tgz as guest root with `HOME=/var/root`, then run onboarding as the desktop user via `prlctl exec --current-user`.
- For `openclaw onboard --non-interactive --secret-input-mode ref --install-daemon`, expect env-backed auth-profile refs (for example `OPENAI_API_KEY`) to be copied into the service env at install time; this path was fixed and should stay green.
- Dont run local + gateway agent turns in parallel on the same fresh workspace/session; they can collide on the session lock. Run sequentially.
- Root-installed tarball smoke on Tahoe can still log plugin blocks for world-writable `extensions/*` under `/opt/homebrew/lib/node_modules/openclaw`; treat that as separate from onboarding/gateway health unless the task is plugin loading.
- Parallels Windows smoke playbook:
- Preferred automation entrypoint: `pnpm test:parallels:windows`. It restores the snapshot most closely matching `pre-openclaw-native-e2e-2026-03-12`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes.
- Gateway verification in smoke runs should use `openclaw gateway status --deep --require-rpc`, not plain `--deep`, so probe failures go non-zero.
- Always use `prlctl exec --current-user` for Windows guest runs; plain `prlctl exec` lands in `NT AUTHORITY\SYSTEM` and does not match the real desktop-user install path.
- Prefer explicit `npm.cmd` / `openclaw.cmd`. Bare `npm` / `openclaw` in PowerShell can hit the `.ps1` shim and fail under restrictive execution policy.
- Use PowerShell only as the transport (`powershell.exe -NoProfile -ExecutionPolicy Bypass`) and call the `.cmd` shims explicitly from inside it.
- Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-windows.*`.
- Parallels Linux smoke playbook:
- Preferred automation entrypoint: `pnpm test:parallels:linux`. It restores the snapshot most closely matching `fresh` on `Ubuntu 24.04.3 ARM64`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes.
- Use plain `prlctl exec` on this snapshot. `--current-user` is not the right transport there.
- Fresh snapshot reality: `curl` is missing and `apt-get update` can fail on clock skew. Bootstrap with `apt-get -o Acquire::Check-Date=false update` and install `curl ca-certificates` before testing installer paths.
- Fresh `main` tgz smoke on Linux still needs the latest-release installer first, because this snapshot has no Node/npm before bootstrap. The harness does stable bootstrap first, then overlays current `main`.
- This snapshot does not have a usable `systemd --user` session. Treat managed daemon install as unsupported here; use `--skip-health`, then verify with direct `openclaw gateway run --bind loopback --port 18789 --force`.
- Env-backed auth refs are still fine, but any direct shell launch (`openclaw gateway run`, `openclaw agent --local`, Linux `gateway status --deep` against that direct run) must inherit the referenced env vars in the same shell.
- `prlctl exec` reaps detached Linux child processes on this snapshot, so a background `openclaw gateway run` launched from automation is not a trustworthy smoke path. The harness verifies installer + `agent --local`; do direct gateway checks only from an interactive guest shell when needed.
- When you do run Linux gateway checks manually from an interactive guest shell, use `openclaw gateway status --deep --require-rpc` so an RPC miss is a hard failure.
- Prefer direct argv guest commands for fetch/install steps (`curl`, `npm install -g`, `openclaw ...`) over nested `bash -lc` quoting; Linux guest quoting through Parallels was the flaky part.
- Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-linux.*`.
- Never edit `node_modules` (global/Homebrew/npm/git installs too). Updates overwrite. Skill notes go in `tools.md` or `AGENTS.md`.
- When adding a new `AGENTS.md` anywhere in the repo, also add a `CLAUDE.md` symlink pointing to it (example: `ln -s AGENTS.md CLAUDE.md`).
- Signal: "update fly" => `fly ssh console -a flawd-bot -C "bash -lc 'cd /data/clawd/openclaw && git pull --rebase origin main'"` then `fly machines restart e825232f34d058 -a flawd-bot`.

View File

@ -6,19 +6,59 @@ Docs: https://docs.openclaw.ai
### Changes
- Browser/existing-session: add an official Chrome DevTools MCP attach mode for signed-in live Chrome sessions, with docs for `chrome://inspect/#remote-debugging` enablement and direct backlinks to Chromes own setup guides.
- Browser/act automation: add batched actions, selector targeting, and delayed clicks for browser act requests with normalized batch dispatch. Thanks @vincentkoc.
- Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus.
- iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show `/pair qr` instructions on the connect step. (#45054) Thanks @ngutman.
- Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei.
### Fixes
- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups.
- Dashboard/chat UI: stop reloading full chat history on every live tool result in dashboard v2 so tool-heavy runs no longer trigger UI freeze/re-render storms while the final event still refreshes persisted history. (#45541) Thanks @BunsDev.
- Ollama/reasoning visibility: stop promoting native `thinking` and `reasoning` fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang.
- Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus.
- Browser/existing-session: accept text-only `list_pages` and `new_page` responses from Chrome DevTools MCP so live-session tab discovery and new-tab open flows keep working when the server omits structured page metadata.
- macOS/exec approvals: respect per-agent exec approval settings in the gateway prompter, including allowlist fallback when the native prompt cannot be shown, so gateway-triggered `system.run` requests follow configured policy instead of always prompting or denying unexpectedly. (#13707) Thanks @sliekens.
- Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus.
- Agents/compaction: compare post-compaction token sanity checks against full-session pre-compaction totals and skip the check when token estimation fails, so sessions with large bootstrap context keep real token counts instead of falling back to unknown. (#28347) thanks @efe-arv.
- Telegram/inbound media IPv4 fallback: retry SSRF-guarded Telegram file downloads once with the same IPv4 fallback policy as Bot API calls so fresh installs on IPv6-broken hosts no longer fail to download inbound images.
- Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark.
- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups.
- Windows/gateway stop: resolve Startup-folder fallback listeners from the installed `gateway.cmd` port, so `openclaw gateway stop` now actually kills fallback-launched gateway processes before restart.
- Windows/gateway status: reuse the installed service command environment when reading runtime status, so startup-fallback gateways keep reporting the configured port and running state in `gateway status --json` instead of falling back to `gateway port unknown`.
- Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding.
- Discord/gateway startup: treat plain-text and transient `/gateway/bot` metadata fetch failures as transient startup errors so Discord gateway boot no longer crashes on unhandled rejections. (#44397) Thanks @jalehman.
- Gateway/session reset: preserve `lastAccountId` and `lastThreadId` across gateway session resets so replies keep routing back to the same account and thread after `/reset`. (#44773) Thanks @Lanfei.
- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei.
- macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots.
- Slack/probe: keep `auth.test()` bot and team metadata mapping stable while simplifying the probe result path. (#44775) Thanks @Cafexss.
- iMessage/remote attachments: reject unsafe remote attachment paths before spawning SCP, so sender-controlled filenames can no longer inject shell metacharacters into remote media staging. Thanks @lintsinghua.
- Telegram/webhook auth: validate the Telegram webhook secret before reading or parsing request bodies, so unauthenticated requests are rejected immediately instead of consuming up to 1 MB first. Thanks @space08.
- Security/device pairing: make bootstrap setup codes single-use so pending device pairing requests cannot be silently replayed and widened to admin before approval. Thanks @tdjackey.
- Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed `EXTERNAL_UNTRUSTED_CONTENT` markers fall back to the existing hardening path instead of bypassing marker normalization.
- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates.
- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path.
- Security/exec approvals: recognize PowerShell `-File` and `-f` wrapper forms during inline-command extraction so approval and command-analysis paths treat file-based PowerShell launches like the existing `-Command` variants.
- Security/exec approvals: unwrap `env` dispatch wrappers inside shell-segment allowlist resolution on macOS so `env FOO=bar /path/to/bin` resolves against the effective executable instead of the wrapper token.
- Security/exec approvals: treat backslash-newline as shell line continuation during macOS shell-chain parsing so line-continued `$(` substitutions fail closed instead of slipping past command-substitution checks.
- Security/exec approvals: bind macOS skill auto-allow trust to both executable name and resolved path so same-basename binaries no longer inherit trust from unrelated skill bins.
- Gateway/status: add `openclaw gateway status --require-rpc` and clearer Linux non-interactive daemon-install failure reporting so automation can fail hard on probe misses instead of treating a printed RPC error as green.
- Dashboard/chat UI: restore the `chat-new-messages` class on the New messages scroll pill so the button uses its existing compact styling instead of rendering as a full-screen SVG overlay. (#44856) Thanks @Astro-Han.
- Build/plugin-sdk bundling: bundle plugin-sdk subpath entries in one shared build pass so published packages stop duplicating shared chunks and avoid the recent plugin-sdk memory blow-up. (#45426) Thanks @TarasShyn.
- Cron/isolated sessions: route nested cron-triggered embedded runner work onto the nested lane so isolated cron jobs no longer deadlock when compaction or other queued inner work runs. Thanks @vincentkoc.
- Agents/OpenAI-compatible compat overrides: respect explicit user `models[].compat` opt-ins for non-native `openai-completions` endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference.
- Agents/Azure OpenAI startup prompts: rephrase the built-in `/new`, `/reset`, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97.
- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei.
- Agents/compaction: compare post-compaction token sanity checks against full-session pre-compaction totals and skip the check when token estimation fails, so sessions with large bootstrap context keep real token counts instead of falling back to unknown. (#28347) thanks @efe-arv.
- Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello.
- Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin.
- Config/validation: accept documented `agents.list[].params` per-agent overrides in strict config validation so `openclaw config validate` no longer rejects runtime-supported `cacheRetention`, `temperature`, and `maxTokens` settings. (#41171) Thanks @atian8179.
- Config/web fetch: restore runtime validation for documented `tools.web.fetch.readability` and `tools.web.fetch.firecrawl` settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec.
- Signal/config validation: add `channels.signal.groups` schema support so per-group `requireMention`, `tools`, and `toolsBySender` overrides no longer get rejected during config validation. (#27199) Thanks @unisone.
- Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh.
- Telegram/media errors: redact Telegram file URLs before building media fetch errors so failed inbound downloads do not leak bot tokens into logs. Thanks @space08.
- Dashboard/chat UI: render oversized plain-text replies as normal paragraphs instead of capped gray code blocks, so long desktop chat responses stay readable without tab-switching refreshes.
- Gateway/Control UI: restore the operator-only device-auth bypass and classify browser connect failures so origin and device-identity problems no longer show up as auth errors in the Control UI and web chat. (#45512) thanks @sallyom.
- macOS/voice wake: stop crashing wake-word command extraction when speech segment ranges come from a different transcript instance.
- Discord/allowlists: honor raw `guild_id` when hydrated guild objects are missing so allowlisted channels and threads like `#maintainers` no longer get false-dropped before channel allowlist checks.
## 2026.3.12
@ -31,6 +71,7 @@ Docs: https://docs.openclaw.ai
- Docs/Kubernetes: Add a starter K8s install path with raw manifests, Kind setup, and deployment docs. Thanks @sallyom @dzianisv @egkristi
- Agents/subagents: add `sessions_yield` so orchestrators can end the current turn immediately, skip queued tool work, and carry a hidden follow-up payload into the next session turn. (#36537) thanks @jriff
- Slack/agent replies: support `channelData.slack.blocks` in the shared reply delivery path so agents can send Block Kit messages through standard Slack outbound delivery. (#44592) Thanks @vincentkoc.
- Slack/interactive replies: add opt-in Slack button and select reply directives behind `channels.slack.capabilities.interactiveReplies`, disabled by default unless explicitly enabled. (#44607) Thanks @vincentkoc.
### Fixes
@ -87,6 +128,7 @@ Docs: https://docs.openclaw.ai
- Gateway/session stores: regenerate the Swift push-test protocol models and align Windows native session-store realpath handling so protocol checks and sync session discovery stop drifting on Windows. (#44266) thanks @jalehman.
- Context engine/session routing: forward optional `sessionKey` through context-engine lifecycle calls so plugins can see structured routing metadata during bootstrap, assembly, post-turn ingestion, and compaction. (#44157) thanks @jalehman.
- Agents/failover: classify z.ai `network_error` stop reasons as retryable timeouts so provider connectivity failures trigger fallback instead of surfacing raw unhandled-stop-reason errors. (#43884) Thanks @hougangdev.
- Config/Anthropic startup: inline Anthropic alias normalization during config load so gateway startup no longer crashes on dated Anthropic model refs like `anthropic/claude-sonnet-4-20250514`. (#45520) Thanks @BunsDev.
- Memory/session sync: add mode-aware post-compaction session reindexing with `agents.defaults.compaction.postIndexSync` plus `agents.defaults.memorySearch.sync.sessions.postCompactionForce`, so compacted session memory can refresh immediately without forcing every deployment into synchronous reindexing. (#25561) thanks @rodrigouroz.
- Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in `/models` button validation. (#40105) Thanks @avirweb.
- Telegram/native command sync: suppress expected `BOT_COMMANDS_TOO_MUCH` retry error noise, add a final fallback summary log, and document the difference between command-menu overflow and real Telegram network failures.
@ -104,6 +146,8 @@ Docs: https://docs.openclaw.ai
- Delivery/dedupe: trim completed direct-cron delivery cache correctly and keep mirrored transcript dedupe active even when transcript files contain malformed lines. (#44666) thanks @frankekn.
- CLI/thinking help: add the missing `xhigh` level hints to `openclaw cron add`, `openclaw cron edit`, and `openclaw agent` so the help text matches the levels already accepted at runtime. (#44819) Thanks @kiki830621.
- Agents/Anthropic replay: drop replayed assistant thinking blocks for native Anthropic and Bedrock Claude providers so persisted follow-up turns no longer fail on stored thinking blocks. (#44843) Thanks @jmcte.
- Docs/Brave pricing: escape literal dollar signs in Brave Search cost text so the docs render the free credit and per-request pricing correctly. (#44989) Thanks @keelanfh.
- Feishu/file uploads: preserve literal UTF-8 filenames in `im.file.create` so Chinese and other non-ASCII filenames no longer appear percent-encoded in chat. (#34262) Thanks @fabiaodemianyang and @KangShuaiFu.
## 2026.3.11
@ -244,6 +288,7 @@ Docs: https://docs.openclaw.ai
- Agents/failover: classify ZenMux quota-refresh `402` responses as `rate_limit` so model fallback retries continue instead of stopping on a temporary subscription window. (#43917) thanks @bwjoke.
- Agents/failover: classify HTTP 422 malformed-request responses as `format` and recognize OpenRouter "requires more credits" billing errors so provider fallback triggers instead of surfacing raw errors. (#43823) thanks @jnMetaCode.
- Memory/QMD Windows: fail closed when `qmd.cmd` or `mcporter.cmd` wrappers cannot be resolved to a direct entrypoint, so memory search no longer falls back to shell execution on Windows.
- macOS/remote gateway: stop PortGuardian from killing Docker Desktop and other external listeners on the gateway port in remote mode, so containerized and tunneled gateway setups no longer lose their port-forward owner on app startup. (#6755) Thanks @teslamint.
## 2026.3.8
@ -322,6 +367,7 @@ Docs: https://docs.openclaw.ai
- Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in `/models` button validation. (#40105) Thanks @avirweb.
- Agents/embedded runner: carry provider-observed overflow token counts into compaction so overflow retries and diagnostics use the rejected live prompt size instead of only transcript estimates. (#40357) thanks @rabsef-bicrym.
- Agents/compaction transcript updates: emit a transcript-update event immediately after successful embedded compaction so downstream listeners observe the post-compact transcript without waiting for a later write. (#25558) thanks @rodrigouroz.
- Agents/sessions_spawn: use the target agent workspace for cross-agent spawned runs instead of inheriting the caller workspace, so child sessions load the correct workspace-scoped instructions and persona files. (#40176) Thanks @moshehbenavraham.
## 2026.3.7
@ -3246,7 +3292,7 @@ Docs: https://docs.openclaw.ai
- Agents: add CLI log hint to "agent failed before reply" messages. (#1550) Thanks @sweepies.
- Agents: warn and ignore tool allowlists that only reference unknown or unloaded plugin tools. (#1566)
- Agents: treat plugin-only tool allowlists as opt-ins; keep core tools enabled. (#1467)
- Agents: honor enqueue overrides for embedded runs to avoid queue deadlocks in tests. (commit 084002998)
- Agents: honor enqueue overrides for embedded runs to avoid queue deadlocks in tests. (#45459) Thanks @LyttonFeng and @vincentkoc.
- Slack: honor open groupPolicy for unlisted channels in message + slash gating. (#1563) Thanks @itsjaydesu.
- Discord: limit autoThread mention bypass to bot-owned threads; keep ack reactions mention-gated. (#1511) Thanks @pvoo.
- Discord: retry rate-limited allowlist resolution + command deploy to avoid gateway crashes. (commit f70ac0c7c)

View File

@ -132,6 +132,7 @@ WORKDIR /app
RUN --mount=type=cache,id=openclaw-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,id=openclaw-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \
apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y --no-install-recommends && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
procps hostname curl git openssl

View File

@ -7,6 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get upgrade -y --no-install-recommends \
&& apt-get install -y --no-install-recommends \
bash \
ca-certificates \

View File

@ -7,6 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get upgrade -y --no-install-recommends \
&& apt-get install -y --no-install-recommends \
bash \
ca-certificates \

View File

@ -24,6 +24,7 @@ ENV PATH=${BUN_INSTALL_DIR}/bin:${BREW_INSTALL_DIR}/bin:${BREW_INSTALL_DIR}/sbin
RUN --mount=type=cache,id=openclaw-sandbox-common-apt-cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,id=openclaw-sandbox-common-apt-lists,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get upgrade -y --no-install-recommends \
&& apt-get install -y --no-install-recommends ${PACKAGES}
RUN if [ "${INSTALL_PNPM}" = "1" ]; then npm install -g pnpm; fi

View File

@ -101,25 +101,19 @@ public enum WakeWordGate {
}
public static func commandText(
transcript: String,
transcript _: String,
segments: [WakeWordSegment],
triggerEndTime: TimeInterval)
-> String {
let threshold = triggerEndTime + 0.001
var commandWords: [String] = []
commandWords.reserveCapacity(segments.count)
for segment in segments where segment.start >= threshold {
if normalizeToken(segment.text).isEmpty { continue }
if let range = segment.range {
let slice = transcript[range.lowerBound...]
return String(slice).trimmingCharacters(in: Self.whitespaceAndPunctuation)
}
break
let normalized = normalizeToken(segment.text)
if normalized.isEmpty { continue }
commandWords.append(segment.text)
}
let text = segments
.filter { $0.start >= threshold && !normalizeToken($0.text).isEmpty }
.map(\.text)
.joined(separator: " ")
return text.trimmingCharacters(in: Self.whitespaceAndPunctuation)
return commandWords.joined(separator: " ").trimmingCharacters(in: Self.whitespaceAndPunctuation)
}
public static func matchesTextOnly(text: String, triggers: [String]) -> Bool {

View File

@ -46,6 +46,25 @@ import Testing
let match = WakeWordGate.match(transcript: transcript, segments: segments, config: config)
#expect(match?.command == "do it")
}
@Test func commandTextHandlesForeignRangeIndices() {
let transcript = "hey clawd do thing"
let other = "do thing"
let foreignRange = other.range(of: "do")
let segments = [
WakeWordSegment(text: "hey", start: 0.0, duration: 0.1, range: transcript.range(of: "hey")),
WakeWordSegment(text: "clawd", start: 0.2, duration: 0.1, range: transcript.range(of: "clawd")),
WakeWordSegment(text: "do", start: 0.9, duration: 0.1, range: foreignRange),
WakeWordSegment(text: "thing", start: 1.1, duration: 0.1, range: nil),
]
let command = WakeWordGate.commandText(
transcript: transcript,
segments: segments,
triggerEndTime: 0.3)
#expect(command == "do thing")
}
}
private func makeSegments(

View File

@ -194,7 +194,7 @@ dependencies {
implementation("androidx.camera:camera-lifecycle:1.5.2")
implementation("androidx.camera:camera-video:1.5.2")
implementation("androidx.camera:camera-view:1.5.2")
implementation("com.journeyapps:zxing-android-embedded:4.3.0")
implementation("com.google.android.gms:play-services-code-scanner:16.1.0")
// Unicast DNS-SD (Wide-Area Bonjour) for tailnet discovery domains.
implementation("dnsjava:dnsjava:3.6.4")

View File

@ -96,8 +96,9 @@ import ai.openclaw.app.LocationMode
import ai.openclaw.app.MainViewModel
import ai.openclaw.app.R
import ai.openclaw.app.node.DeviceNotificationListenerService
import com.journeyapps.barcodescanner.ScanContract
import com.journeyapps.barcodescanner.ScanOptions
import com.google.mlkit.vision.barcode.common.Barcode
import com.google.mlkit.vision.codescanner.GmsBarcodeScannerOptions
import com.google.mlkit.vision.codescanner.GmsBarcodeScanning
private enum class OnboardingStep(val index: Int, val label: String) {
Welcome(1, "Welcome"),
@ -241,6 +242,13 @@ fun OnboardingFlow(viewModel: MainViewModel, modifier: Modifier = Modifier) {
var attemptedConnect by rememberSaveable { mutableStateOf(false) }
val lifecycleOwner = LocalLifecycleOwner.current
val qrScannerOptions =
remember {
GmsBarcodeScannerOptions.Builder()
.setBarcodeFormats(Barcode.FORMAT_QR_CODE)
.build()
}
val qrScanner = remember(context, qrScannerOptions) { GmsBarcodeScanning.getClient(context, qrScannerOptions) }
val smsAvailable =
remember(context) {
@ -460,23 +468,6 @@ fun OnboardingFlow(viewModel: MainViewModel, modifier: Modifier = Modifier) {
onDispose { lifecycleOwner.lifecycle.removeObserver(observer) }
}
val qrScanLauncher =
rememberLauncherForActivityResult(ScanContract()) { result ->
val contents = result.contents?.trim().orEmpty()
if (contents.isEmpty()) {
return@rememberLauncherForActivityResult
}
val scannedSetupCode = resolveScannedSetupCode(contents)
if (scannedSetupCode == null) {
gatewayError = "QR code did not contain a valid setup code."
return@rememberLauncherForActivityResult
}
setupCode = scannedSetupCode
gatewayInputMode = GatewayInputMode.SetupCode
gatewayError = null
attemptedConnect = false
}
if (pendingTrust != null) {
val prompt = pendingTrust!!
AlertDialog(
@ -552,14 +543,28 @@ fun OnboardingFlow(viewModel: MainViewModel, modifier: Modifier = Modifier) {
gatewayError = gatewayError,
onScanQrClick = {
gatewayError = null
qrScanLauncher.launch(
ScanOptions().apply {
setDesiredBarcodeFormats(ScanOptions.QR_CODE)
setPrompt("Scan OpenClaw onboarding QR")
setBeepEnabled(false)
setOrientationLocked(false)
},
)
qrScanner.startScan()
.addOnSuccessListener { barcode ->
val contents = barcode.rawValue?.trim().orEmpty()
if (contents.isEmpty()) {
return@addOnSuccessListener
}
val scannedSetupCode = resolveScannedSetupCode(contents)
if (scannedSetupCode == null) {
gatewayError = "QR code did not contain a valid setup code."
return@addOnSuccessListener
}
setupCode = scannedSetupCode
gatewayInputMode = GatewayInputMode.SetupCode
gatewayError = null
attemptedConnect = false
}
.addOnCanceledListener {
// User dismissed the scanner; preserve current form state.
}
.addOnFailureListener {
gatewayError = qrScannerErrorMessage()
}
},
onAdvancedOpenChange = { gatewayAdvancedOpen = it },
onInputModeChange = {
@ -1785,6 +1790,10 @@ private fun isPermissionGranted(context: Context, permission: String): Boolean {
return ContextCompat.checkSelfPermission(context, permission) == PackageManager.PERMISSION_GRANTED
}
private fun qrScannerErrorMessage(): String {
return "Google Code Scanner could not start. Update Google Play services or use the setup code manually."
}
private fun isNotificationListenerEnabled(context: Context): Boolean {
return DeviceNotificationListenerService.isAccessEnabled(context)
}

View File

@ -189,6 +189,7 @@ final class ShareViewController: UIViewController {
try await gateway.connect(
url: url,
token: config.token,
bootstrapToken: nil,
password: config.password,
connectOptions: makeOptions("openclaw-ios"),
sessionBox: nil,
@ -208,6 +209,7 @@ final class ShareViewController: UIViewController {
try await gateway.connect(
url: url,
token: config.token,
bootstrapToken: nil,
password: config.password,
connectOptions: makeOptions("moltbot-ios"),
sessionBox: nil,

View File

@ -19,6 +19,7 @@ enum OnboardingConnectionMode: String, CaseIterable {
enum OnboardingStateStore {
private static let completedDefaultsKey = "onboarding.completed"
private static let firstRunIntroSeenDefaultsKey = "onboarding.first_run_intro_seen"
private static let lastModeDefaultsKey = "onboarding.last_mode"
private static let lastSuccessTimeDefaultsKey = "onboarding.last_success_time"
@ -39,10 +40,23 @@ enum OnboardingStateStore {
defaults.set(Int(Date().timeIntervalSince1970), forKey: Self.lastSuccessTimeDefaultsKey)
}
static func shouldPresentFirstRunIntro(defaults: UserDefaults = .standard) -> Bool {
!defaults.bool(forKey: Self.firstRunIntroSeenDefaultsKey)
}
static func markFirstRunIntroSeen(defaults: UserDefaults = .standard) {
defaults.set(true, forKey: Self.firstRunIntroSeenDefaultsKey)
}
static func markIncomplete(defaults: UserDefaults = .standard) {
defaults.set(false, forKey: Self.completedDefaultsKey)
}
static func reset(defaults: UserDefaults = .standard) {
defaults.set(false, forKey: Self.completedDefaultsKey)
defaults.set(false, forKey: Self.firstRunIntroSeenDefaultsKey)
}
static func lastMode(defaults: UserDefaults = .standard) -> OnboardingConnectionMode? {
let raw = defaults.string(forKey: Self.lastModeDefaultsKey)?
.trimmingCharacters(in: .whitespacesAndNewlines) ?? ""

View File

@ -6,6 +6,7 @@ import SwiftUI
import UIKit
private enum OnboardingStep: Int, CaseIterable {
case intro
case welcome
case mode
case connect
@ -29,7 +30,8 @@ private enum OnboardingStep: Int, CaseIterable {
var title: String {
switch self {
case .welcome: "Welcome"
case .intro: "Welcome"
case .welcome: "Connect Gateway"
case .mode: "Connection Mode"
case .connect: "Connect"
case .auth: "Authentication"
@ -38,7 +40,7 @@ private enum OnboardingStep: Int, CaseIterable {
}
var canGoBack: Bool {
self != .welcome && self != .success
self != .intro && self != .welcome && self != .success
}
}
@ -49,7 +51,7 @@ struct OnboardingWizardView: View {
@AppStorage("node.instanceId") private var instanceId: String = UUID().uuidString
@AppStorage("gateway.discovery.domain") private var discoveryDomain: String = ""
@AppStorage("onboarding.developerMode") private var developerModeEnabled: Bool = false
@State private var step: OnboardingStep = .welcome
@State private var step: OnboardingStep
@State private var selectedMode: OnboardingConnectionMode?
@State private var manualHost: String = ""
@State private var manualPort: Int = 18789
@ -58,11 +60,10 @@ struct OnboardingWizardView: View {
@State private var gatewayToken: String = ""
@State private var gatewayPassword: String = ""
@State private var connectMessage: String?
@State private var statusLine: String = "Scan the QR code from your gateway to connect."
@State private var statusLine: String = "In your OpenClaw chat, run /pair qr, then scan the code here."
@State private var connectingGatewayID: String?
@State private var issue: GatewayConnectionIssue = .none
@State private var didMarkCompleted = false
@State private var didAutoPresentQR = false
@State private var pairingRequestId: String?
@State private var discoveryRestartTask: Task<Void, Never>?
@State private var showQRScanner: Bool = false
@ -74,14 +75,23 @@ struct OnboardingWizardView: View {
let allowSkip: Bool
let onClose: () -> Void
init(allowSkip: Bool, onClose: @escaping () -> Void) {
self.allowSkip = allowSkip
self.onClose = onClose
_step = State(
initialValue: OnboardingStateStore.shouldPresentFirstRunIntro() ? .intro : .welcome)
}
private var isFullScreenStep: Bool {
self.step == .welcome || self.step == .success
self.step == .intro || self.step == .welcome || self.step == .success
}
var body: some View {
NavigationStack {
Group {
switch self.step {
case .intro:
self.introStep
case .welcome:
self.welcomeStep
case .success:
@ -293,6 +303,83 @@ struct OnboardingWizardView: View {
}
}
@ViewBuilder
private var introStep: some View {
VStack(spacing: 0) {
Spacer()
Image(systemName: "iphone.gen3")
.font(.system(size: 60, weight: .semibold))
.foregroundStyle(.tint)
.padding(.bottom, 18)
Text("Welcome to OpenClaw")
.font(.largeTitle.weight(.bold))
.multilineTextAlignment(.center)
.padding(.bottom, 10)
Text("Turn this iPhone into a secure OpenClaw node for chat, voice, camera, and device tools.")
.font(.subheadline)
.foregroundStyle(.secondary)
.multilineTextAlignment(.center)
.padding(.horizontal, 32)
.padding(.bottom, 24)
VStack(alignment: .leading, spacing: 14) {
Label("Connect to your gateway", systemImage: "link")
Label("Choose device permissions", systemImage: "hand.raised")
Label("Use OpenClaw from your phone", systemImage: "message.fill")
}
.font(.subheadline.weight(.semibold))
.frame(maxWidth: .infinity, alignment: .leading)
.padding(18)
.background {
RoundedRectangle(cornerRadius: 20, style: .continuous)
.fill(Color(uiColor: .secondarySystemBackground))
}
.padding(.horizontal, 24)
.padding(.bottom, 16)
HStack(alignment: .top, spacing: 12) {
Image(systemName: "exclamationmark.triangle.fill")
.font(.title3.weight(.semibold))
.foregroundStyle(.orange)
.frame(width: 24)
.padding(.top, 2)
VStack(alignment: .leading, spacing: 6) {
Text("Security notice")
.font(.headline)
Text(
"The connected OpenClaw agent can use device capabilities you enable, such as camera, microphone, photos, contacts, calendar, and location. Continue only if you trust the gateway and agent you connect to.")
.font(.footnote)
.foregroundStyle(.secondary)
.fixedSize(horizontal: false, vertical: true)
}
}
.frame(maxWidth: .infinity, alignment: .leading)
.padding(18)
.background {
RoundedRectangle(cornerRadius: 20, style: .continuous)
.fill(Color(uiColor: .secondarySystemBackground))
}
.padding(.horizontal, 24)
Spacer()
Button {
self.advanceFromIntro()
} label: {
Text("Continue")
.frame(maxWidth: .infinity)
}
.buttonStyle(.borderedProminent)
.controlSize(.large)
.padding(.horizontal, 24)
.padding(.bottom, 48)
}
}
@ViewBuilder
private var welcomeStep: some View {
VStack(spacing: 0) {
@ -303,16 +390,37 @@ struct OnboardingWizardView: View {
.foregroundStyle(.tint)
.padding(.bottom, 20)
Text("Welcome")
Text("Connect Gateway")
.font(.largeTitle.weight(.bold))
.padding(.bottom, 8)
Text("Connect to your OpenClaw gateway")
Text("Scan a QR code from your OpenClaw gateway or continue with manual setup.")
.font(.subheadline)
.foregroundStyle(.secondary)
.multilineTextAlignment(.center)
.padding(.horizontal, 32)
VStack(alignment: .leading, spacing: 8) {
Text("How to pair")
.font(.headline)
Text("In your OpenClaw chat, run")
.font(.footnote)
.foregroundStyle(.secondary)
Text("/pair qr")
.font(.system(.footnote, design: .monospaced).weight(.semibold))
Text("Then scan the QR code here to connect this iPhone.")
.font(.footnote)
.foregroundStyle(.secondary)
}
.frame(maxWidth: .infinity, alignment: .leading)
.padding(16)
.background {
RoundedRectangle(cornerRadius: 18, style: .continuous)
.fill(Color(uiColor: .secondarySystemBackground))
}
.padding(.horizontal, 24)
.padding(.top, 20)
Spacer()
VStack(spacing: 12) {
@ -342,8 +450,7 @@ struct OnboardingWizardView: View {
.foregroundStyle(.secondary)
.multilineTextAlignment(.center)
.padding(.horizontal, 24)
.padding(.horizontal, 24)
.padding(.bottom, 48)
.padding(.bottom, 48)
}
}
@ -727,6 +834,12 @@ struct OnboardingWizardView: View {
return nil
}
private func advanceFromIntro() {
OnboardingStateStore.markFirstRunIntroSeen()
self.statusLine = "In your OpenClaw chat, run /pair qr, then scan the code here."
self.step = .welcome
}
private func navigateBack() {
guard let target = self.step.previous else { return }
self.connectingGatewayID = nil
@ -775,10 +888,8 @@ struct OnboardingWizardView: View {
let hasSavedGateway = GatewaySettingsStore.loadLastGatewayConnection() != nil
let hasToken = !self.gatewayToken.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
let hasPassword = !self.gatewayPassword.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
if !self.didAutoPresentQR, !hasSavedGateway, !hasToken, !hasPassword {
self.didAutoPresentQR = true
self.statusLine = "No saved pairing found. Scan QR code to connect."
self.showQRScanner = true
if !hasSavedGateway, !hasToken, !hasPassword {
self.statusLine = "No saved pairing found. In your OpenClaw chat, run /pair qr, then scan the code here."
}
}

View File

@ -1008,6 +1008,7 @@ struct SettingsTab: View {
// Reset onboarding state + clear saved gateway connection (the two things RootCanvas checks).
GatewaySettingsStore.clearLastGatewayConnection()
OnboardingStateStore.reset()
// RootCanvas also short-circuits onboarding when these are true.
self.onboardingComplete = false

View File

@ -39,6 +39,35 @@ import Testing
#expect(OnboardingStateStore.shouldPresentOnLaunch(appModel: appModel, defaults: defaults))
}
@Test func firstRunIntroDefaultsToVisibleThenPersists() {
let testDefaults = self.makeDefaults()
let defaults = testDefaults.defaults
defer { self.reset(testDefaults) }
#expect(OnboardingStateStore.shouldPresentFirstRunIntro(defaults: defaults))
OnboardingStateStore.markFirstRunIntroSeen(defaults: defaults)
#expect(!OnboardingStateStore.shouldPresentFirstRunIntro(defaults: defaults))
}
@Test @MainActor func resetClearsCompletionAndIntroSeen() {
let testDefaults = self.makeDefaults()
let defaults = testDefaults.defaults
defer { self.reset(testDefaults) }
OnboardingStateStore.markCompleted(mode: .homeNetwork, defaults: defaults)
OnboardingStateStore.markFirstRunIntroSeen(defaults: defaults)
OnboardingStateStore.reset(defaults: defaults)
let appModel = NodeAppModel()
appModel.gatewayServerName = nil
#expect(OnboardingStateStore.shouldPresentOnLaunch(appModel: appModel, defaults: defaults))
#expect(OnboardingStateStore.shouldPresentFirstRunIntro(defaults: defaults))
#expect(OnboardingStateStore.lastMode(defaults: defaults) == .homeNetwork)
}
private struct TestDefaults {
var suiteName: String
var defaults: UserDefaults

View File

@ -45,8 +45,8 @@ enum ExecApprovalEvaluator {
let skillAllow: Bool
if approvals.agent.autoAllowSkills, !allowlistResolutions.isEmpty {
let bins = await SkillBinsCache.shared.currentBins()
skillAllow = allowlistResolutions.allSatisfy { bins.contains($0.executableName) }
let bins = await SkillBinsCache.shared.currentTrust()
skillAllow = self.isSkillAutoAllowed(allowlistResolutions, trustedBinsByName: bins)
} else {
skillAllow = false
}
@ -65,4 +65,26 @@ enum ExecApprovalEvaluator {
allowlistMatch: allowlistSatisfied ? allowlistMatches.first : nil,
skillAllow: skillAllow)
}
static func isSkillAutoAllowed(
_ resolutions: [ExecCommandResolution],
trustedBinsByName: [String: Set<String>]) -> Bool
{
guard !resolutions.isEmpty, !trustedBinsByName.isEmpty else { return false }
return resolutions.allSatisfy { resolution in
guard let executableName = SkillBinsCache.normalizeSkillBinName(resolution.executableName),
let resolvedPath = SkillBinsCache.normalizeResolvedPath(resolution.resolvedPath)
else {
return false
}
return trustedBinsByName[executableName]?.contains(resolvedPath) == true
}
}
static func _testIsSkillAutoAllowed(
_ resolutions: [ExecCommandResolution],
trustedBinsByName: [String: Set<String>]) -> Bool
{
self.isSkillAutoAllowed(resolutions, trustedBinsByName: trustedBinsByName)
}
}

View File

@ -370,6 +370,17 @@ enum ExecApprovalsStore {
static func resolve(agentId: String?) -> ExecApprovalsResolved {
let file = self.ensureFile()
return self.resolveFromFile(file, agentId: agentId)
}
/// Read-only resolve: loads file without writing (no ensureFile side effects).
/// Safe to call from background threads / off MainActor.
static func resolveReadOnly(agentId: String?) -> ExecApprovalsResolved {
let file = self.loadFile()
return self.resolveFromFile(file, agentId: agentId)
}
private static func resolveFromFile(_ file: ExecApprovalsFile, agentId: String?) -> ExecApprovalsResolved {
let defaults = file.defaults ?? ExecApprovalsDefaults()
let resolvedDefaults = ExecApprovalsResolvedDefaults(
security: defaults.security ?? self.defaultSecurity,
@ -777,6 +788,7 @@ actor SkillBinsCache {
static let shared = SkillBinsCache()
private var bins: Set<String> = []
private var trustByName: [String: Set<String>] = [:]
private var lastRefresh: Date?
private let refreshInterval: TimeInterval = 90
@ -787,27 +799,90 @@ actor SkillBinsCache {
return self.bins
}
func currentTrust(force: Bool = false) async -> [String: Set<String>] {
if force || self.isStale() {
await self.refresh()
}
return self.trustByName
}
func refresh() async {
do {
let report = try await GatewayConnection.shared.skillsStatus()
var next = Set<String>()
for skill in report.skills {
for bin in skill.requirements.bins {
let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines)
if !trimmed.isEmpty { next.insert(trimmed) }
}
}
self.bins = next
let trust = Self.buildTrustIndex(report: report, searchPaths: CommandResolver.preferredPaths())
self.bins = trust.names
self.trustByName = trust.pathsByName
self.lastRefresh = Date()
} catch {
if self.lastRefresh == nil {
self.bins = []
self.trustByName = [:]
}
}
}
static func normalizeSkillBinName(_ value: String) -> String? {
let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines).lowercased()
return trimmed.isEmpty ? nil : trimmed
}
static func normalizeResolvedPath(_ value: String?) -> String? {
let trimmed = value?.trimmingCharacters(in: .whitespacesAndNewlines) ?? ""
guard !trimmed.isEmpty else { return nil }
return URL(fileURLWithPath: trimmed).standardizedFileURL.path
}
static func buildTrustIndex(
report: SkillsStatusReport,
searchPaths: [String]) -> SkillBinTrustIndex
{
var names = Set<String>()
var pathsByName: [String: Set<String>] = [:]
for skill in report.skills {
for bin in skill.requirements.bins {
let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { continue }
names.insert(trimmed)
guard let name = self.normalizeSkillBinName(trimmed),
let resolvedPath = self.resolveSkillBinPath(trimmed, searchPaths: searchPaths),
let normalizedPath = self.normalizeResolvedPath(resolvedPath)
else {
continue
}
var paths = pathsByName[name] ?? Set<String>()
paths.insert(normalizedPath)
pathsByName[name] = paths
}
}
return SkillBinTrustIndex(names: names, pathsByName: pathsByName)
}
private static func resolveSkillBinPath(_ bin: String, searchPaths: [String]) -> String? {
let expanded = bin.hasPrefix("~") ? (bin as NSString).expandingTildeInPath : bin
if expanded.contains("/") || expanded.contains("\\") {
return FileManager().isExecutableFile(atPath: expanded) ? expanded : nil
}
return CommandResolver.findExecutable(named: expanded, searchPaths: searchPaths)
}
private func isStale() -> Bool {
guard let lastRefresh else { return true }
return Date().timeIntervalSince(lastRefresh) > self.refreshInterval
}
static func _testBuildTrustIndex(
report: SkillsStatusReport,
searchPaths: [String]) -> SkillBinTrustIndex
{
self.buildTrustIndex(report: report, searchPaths: searchPaths)
}
}
struct SkillBinTrustIndex {
let names: Set<String>
let pathsByName: [String: Set<String>]
}

View File

@ -43,7 +43,33 @@ final class ExecApprovalsGatewayPrompter {
do {
let data = try JSONEncoder().encode(payload)
let request = try JSONDecoder().decode(GatewayApprovalRequest.self, from: data)
guard self.shouldPresent(request: request) else { return }
let presentation = self.shouldPresent(request: request)
guard presentation.shouldAsk else {
// Ask policy says no prompt needed resolve based on security policy
let decision: ExecApprovalDecision = presentation.security == .full ? .allowOnce : .deny
try await GatewayConnection.shared.requestVoid(
method: .execApprovalResolve,
params: [
"id": AnyCodable(request.id),
"decision": AnyCodable(decision.rawValue),
],
timeoutMs: 10000)
return
}
guard presentation.canPresent else {
let decision = Self.fallbackDecision(
request: request.request,
askFallback: presentation.askFallback,
allowlist: presentation.allowlist)
try await GatewayConnection.shared.requestVoid(
method: .execApprovalResolve,
params: [
"id": AnyCodable(request.id),
"decision": AnyCodable(decision.rawValue),
],
timeoutMs: 10000)
return
}
let decision = ExecApprovalsPromptPresenter.prompt(request.request)
try await GatewayConnection.shared.requestVoid(
method: .execApprovalResolve,
@ -57,16 +83,89 @@ final class ExecApprovalsGatewayPrompter {
}
}
private func shouldPresent(request: GatewayApprovalRequest) -> Bool {
/// Whether the ask policy requires prompting the user.
/// Note: this only determines if a prompt is shown, not whether the action is allowed.
/// The security policy (full/deny/allowlist) decides the actual outcome.
private static func shouldAsk(security: ExecSecurity, ask: ExecAsk) -> Bool {
switch ask {
case .always:
return true
case .onMiss:
return security == .allowlist
case .off:
return false
}
}
struct PresentationDecision {
/// Whether the ask policy requires prompting the user (not whether the action is allowed).
var shouldAsk: Bool
/// Whether the prompt can actually be shown (session match, recent activity, etc.).
var canPresent: Bool
/// The resolved security policy, used to determine allow/deny when no prompt is shown.
var security: ExecSecurity
/// Fallback security policy when a prompt is needed but can't be presented.
var askFallback: ExecSecurity
var allowlist: [ExecAllowlistEntry]
}
private func shouldPresent(request: GatewayApprovalRequest) -> PresentationDecision {
let mode = AppStateStore.shared.connectionMode
let activeSession = WebChatManager.shared.activeSessionKey?.trimmingCharacters(in: .whitespacesAndNewlines)
let requestSession = request.request.sessionKey?.trimmingCharacters(in: .whitespacesAndNewlines)
return Self.shouldPresent(
// Read-only resolve to avoid disk writes on the MainActor
let approvals = ExecApprovalsStore.resolveReadOnly(agentId: request.request.agentId)
let security = approvals.agent.security
let ask = approvals.agent.ask
let shouldAsk = Self.shouldAsk(security: security, ask: ask)
let canPresent = shouldAsk && Self.shouldPresent(
mode: mode,
activeSession: activeSession,
requestSession: requestSession,
lastInputSeconds: Self.lastInputSeconds(),
thresholdSeconds: 120)
return PresentationDecision(
shouldAsk: shouldAsk,
canPresent: canPresent,
security: security,
askFallback: approvals.agent.askFallback,
allowlist: approvals.allowlist)
}
private static func fallbackDecision(
request: ExecApprovalPromptRequest,
askFallback: ExecSecurity,
allowlist: [ExecAllowlistEntry]) -> ExecApprovalDecision
{
guard askFallback == .allowlist else {
return askFallback == .full ? .allowOnce : .deny
}
let resolution = self.fallbackResolution(for: request)
let match = ExecAllowlistMatcher.match(entries: allowlist, resolution: resolution)
return match == nil ? .deny : .allowOnce
}
private static func fallbackResolution(for request: ExecApprovalPromptRequest) -> ExecCommandResolution? {
let resolvedPath = request.resolvedPath?.trimmingCharacters(in: .whitespacesAndNewlines)
let trimmedResolvedPath = (resolvedPath?.isEmpty == false) ? resolvedPath : nil
let rawExecutable = self.firstToken(from: request.command) ?? trimmedResolvedPath ?? ""
guard !rawExecutable.isEmpty || trimmedResolvedPath != nil else { return nil }
let executableName = trimmedResolvedPath.map { URL(fileURLWithPath: $0).lastPathComponent } ?? rawExecutable
return ExecCommandResolution(
rawExecutable: rawExecutable,
resolvedPath: trimmedResolvedPath,
executableName: executableName,
cwd: request.cwd)
}
private static func firstToken(from command: String) -> String? {
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return nil }
return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init)
}
private static func shouldPresent(
@ -117,5 +216,29 @@ extension ExecApprovalsGatewayPrompter {
lastInputSeconds: lastInputSeconds,
thresholdSeconds: thresholdSeconds)
}
static func _testShouldAsk(security: ExecSecurity, ask: ExecAsk) -> Bool {
self.shouldAsk(security: security, ask: ask)
}
static func _testFallbackDecision(
command: String,
resolvedPath: String?,
askFallback: ExecSecurity,
allowlistPatterns: [String]) -> ExecApprovalDecision
{
self.fallbackDecision(
request: ExecApprovalPromptRequest(
command: command,
cwd: nil,
host: nil,
security: nil,
ask: nil,
agentId: nil,
resolvedPath: resolvedPath,
sessionKey: nil),
askFallback: askFallback,
allowlist: allowlistPatterns.map { ExecAllowlistEntry(pattern: $0) })
}
}
#endif

View File

@ -37,8 +37,7 @@ struct ExecCommandResolution {
var resolutions: [ExecCommandResolution] = []
resolutions.reserveCapacity(segments.count)
for segment in segments {
guard let token = self.parseFirstToken(segment),
let resolution = self.resolveExecutable(rawExecutable: token, cwd: cwd, env: env)
guard let resolution = self.resolveShellSegmentExecutable(segment, cwd: cwd, env: env)
else {
return []
}
@ -88,6 +87,20 @@ struct ExecCommandResolution {
cwd: cwd)
}
private static func resolveShellSegmentExecutable(
_ segment: String,
cwd: String?,
env: [String: String]?) -> ExecCommandResolution?
{
let tokens = self.tokenizeShellWords(segment)
guard !tokens.isEmpty else { return nil }
let effective = ExecEnvInvocationUnwrapper.unwrapDispatchWrappersForResolution(tokens)
guard let raw = effective.first?.trimmingCharacters(in: .whitespacesAndNewlines), !raw.isEmpty else {
return nil
}
return self.resolveExecutable(rawExecutable: raw, cwd: cwd, env: env)
}
private static func parseFirstToken(_ command: String) -> String? {
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return nil }
@ -102,6 +115,59 @@ struct ExecCommandResolution {
return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init)
}
private static func tokenizeShellWords(_ command: String) -> [String] {
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return [] }
var tokens: [String] = []
var current = ""
var inSingle = false
var inDouble = false
var escaped = false
func appendCurrent() {
guard !current.isEmpty else { return }
tokens.append(current)
current.removeAll(keepingCapacity: true)
}
for ch in trimmed {
if escaped {
current.append(ch)
escaped = false
continue
}
if ch == "\\", !inSingle {
escaped = true
continue
}
if ch == "'", !inDouble {
inSingle.toggle()
continue
}
if ch == "\"", !inSingle {
inDouble.toggle()
continue
}
if ch.isWhitespace, !inSingle, !inDouble {
appendCurrent()
continue
}
current.append(ch)
}
if escaped {
current.append("\\")
}
appendCurrent()
return tokens
}
private enum ShellTokenContext {
case unquoted
case doubleQuoted
@ -148,8 +214,14 @@ struct ExecCommandResolution {
while idx < chars.count {
let ch = chars[idx]
let next: Character? = idx + 1 < chars.count ? chars[idx + 1] : nil
let lookahead = self.nextShellSignificantCharacter(chars: chars, after: idx, inSingle: inSingle)
if escaped {
if ch == "\n" {
escaped = false
idx += 1
continue
}
current.append(ch)
escaped = false
idx += 1
@ -157,6 +229,10 @@ struct ExecCommandResolution {
}
if ch == "\\", !inSingle {
if next == "\n" {
idx += 2
continue
}
current.append(ch)
escaped = true
idx += 1
@ -177,7 +253,7 @@ struct ExecCommandResolution {
continue
}
if !inSingle, self.shouldFailClosedForShell(ch: ch, next: next, inDouble: inDouble) {
if !inSingle, self.shouldFailClosedForShell(ch: ch, next: lookahead, inDouble: inDouble) {
// Fail closed on command/process substitution in allowlist mode,
// including command substitution inside double-quoted shell strings.
return nil
@ -201,6 +277,25 @@ struct ExecCommandResolution {
return segments
}
private static func nextShellSignificantCharacter(
chars: [Character],
after idx: Int,
inSingle: Bool) -> Character?
{
guard !inSingle else {
return idx + 1 < chars.count ? chars[idx + 1] : nil
}
var cursor = idx + 1
while cursor < chars.count {
if chars[cursor] == "\\", cursor + 1 < chars.count, chars[cursor + 1] == "\n" {
cursor += 2
continue
}
return chars[cursor]
}
return nil
}
private static func shouldFailClosedForShell(ch: Character, next: Character?, inDouble: Bool) -> Bool {
let context: ShellTokenContext = inDouble ? .doubleQuoted : .unquoted
guard let rules = self.shellFailClosedRules[context] else {

View File

@ -47,7 +47,7 @@ actor PortGuardian {
let listeners = await self.listeners(on: port)
guard !listeners.isEmpty else { continue }
for listener in listeners {
if self.isExpected(listener, port: port, mode: mode) {
if Self.isExpected(listener, port: port, mode: mode) {
let message = """
port \(port) already served by expected \(listener.command)
(pid \(listener.pid)) keeping
@ -55,6 +55,14 @@ actor PortGuardian {
self.logger.info("\(message, privacy: .public)")
continue
}
if mode == .remote {
let message = """
port \(port) held by \(listener.command)
(pid \(listener.pid)) in remote mode not killing
"""
self.logger.warning(message)
continue
}
let killed = await self.kill(listener.pid)
if killed {
let message = """
@ -271,8 +279,8 @@ actor PortGuardian {
switch mode {
case .remote:
expectedDesc = "SSH tunnel to remote gateway"
okPredicate = { $0.command.lowercased().contains("ssh") }
expectedDesc = "Remote gateway (SSH tunnel, Docker, or direct)"
okPredicate = { _ in true }
case .local:
expectedDesc = "Gateway websocket (node/tsx)"
okPredicate = { listener in
@ -352,13 +360,12 @@ actor PortGuardian {
return sigkill.ok
}
private func isExpected(_ listener: Listener, port: Int, mode: AppState.ConnectionMode) -> Bool {
private static func isExpected(_ listener: Listener, port: Int, mode: AppState.ConnectionMode) -> Bool {
let cmd = listener.command.lowercased()
let full = listener.fullCommand.lowercased()
switch mode {
case .remote:
// Remote mode expects an SSH tunnel for the gateway WebSocket port.
if port == GatewayEnvironment.gatewayPort() { return cmd.contains("ssh") }
if port == GatewayEnvironment.gatewayPort() { return true }
return false
case .local:
// The gateway daemon may listen as `openclaw` or as its runtime (`node`, `bun`, etc).
@ -406,6 +413,16 @@ extension PortGuardian {
self.parseListeners(from: text).map { ($0.pid, $0.command, $0.fullCommand, $0.user) }
}
static func _testIsExpected(
command: String,
fullCommand: String,
port: Int,
mode: AppState.ConnectionMode) -> Bool
{
let listener = Listener(pid: 0, command: command, fullCommand: fullCommand, user: nil)
return Self.isExpected(listener, port: port, mode: mode)
}
static func _testBuildReport(
port: Int,
mode: AppState.ConnectionMode,

View File

@ -141,6 +141,26 @@ struct ExecAllowlistTests {
#expect(resolutions.isEmpty)
}
@Test func `resolve for allowlist fails closed on line-continued command substitution`() {
let command = ["/bin/sh", "-lc", "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.isEmpty)
}
@Test func `resolve for allowlist fails closed on chained line-continued command substitution`() {
let command = ["/bin/sh", "-lc", "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.isEmpty)
}
@Test func `resolve for allowlist fails closed on quoted backticks`() {
let command = ["/bin/sh", "-lc", "echo \"ok `/usr/bin/id`\""]
let resolutions = ExecCommandResolution.resolveForAllowlist(
@ -208,6 +228,30 @@ struct ExecAllowlistTests {
#expect(resolutions[1].executableName == "touch")
}
@Test func `resolve for allowlist unwraps env dispatch wrappers inside shell segments`() {
let command = ["/bin/sh", "-lc", "env /usr/bin/touch /tmp/openclaw-allowlist-test"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "env /usr/bin/touch /tmp/openclaw-allowlist-test",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.count == 1)
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
#expect(resolutions[0].executableName == "touch")
}
@Test func `resolve for allowlist unwraps env assignments inside shell segments`() {
let command = ["/bin/sh", "-lc", "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.count == 1)
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
#expect(resolutions[0].executableName == "touch")
}
@Test func `resolve for allowlist unwraps env to effective direct executable`() {
let command = ["/usr/bin/env", "FOO=bar", "/usr/bin/printf", "ok"]
let resolutions = ExecCommandResolution.resolveForAllowlist(

View File

@ -52,4 +52,51 @@ struct ExecApprovalsGatewayPrompterTests {
lastInputSeconds: 400)
#expect(!remote)
}
// MARK: - shouldAsk
@Test func askAlwaysPromptsRegardlessOfSecurity() {
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .always))
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .always))
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .always))
}
@Test func askOnMissPromptsOnlyForAllowlist() {
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .onMiss))
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .onMiss))
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .onMiss))
}
@Test func askOffNeverPrompts() {
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .off))
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .off))
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .off))
}
@Test func fallbackAllowlistAllowsMatchingResolvedPath() {
let decision = ExecApprovalsGatewayPrompter._testFallbackDecision(
command: "git status",
resolvedPath: "/usr/bin/git",
askFallback: .allowlist,
allowlistPatterns: ["/usr/bin/git"])
#expect(decision == .allowOnce)
}
@Test func fallbackAllowlistDeniesAllowlistMiss() {
let decision = ExecApprovalsGatewayPrompter._testFallbackDecision(
command: "git status",
resolvedPath: "/usr/bin/git",
askFallback: .allowlist,
allowlistPatterns: ["/usr/bin/rg"])
#expect(decision == .deny)
}
@Test func fallbackFullAllowsWhenPromptCannotBeShown() {
let decision = ExecApprovalsGatewayPrompter._testFallbackDecision(
command: "git status",
resolvedPath: "/usr/bin/git",
askFallback: .full,
allowlistPatterns: [])
#expect(decision == .allowOnce)
}
}

View File

@ -0,0 +1,90 @@
import Foundation
import Testing
@testable import OpenClaw
struct ExecSkillBinTrustTests {
@Test func `build trust index resolves skill bin paths`() throws {
let fixture = try Self.makeExecutable(named: "jq")
defer { try? FileManager.default.removeItem(at: fixture.root) }
let trust = SkillBinsCache._testBuildTrustIndex(
report: Self.makeReport(bins: ["jq"]),
searchPaths: [fixture.root.path])
#expect(trust.names == ["jq"])
#expect(trust.pathsByName["jq"] == [fixture.path])
}
@Test func `skill auto allow accepts trusted resolved skill bin path`() throws {
let fixture = try Self.makeExecutable(named: "jq")
defer { try? FileManager.default.removeItem(at: fixture.root) }
let trust = SkillBinsCache._testBuildTrustIndex(
report: Self.makeReport(bins: ["jq"]),
searchPaths: [fixture.root.path])
let resolution = ExecCommandResolution(
rawExecutable: "jq",
resolvedPath: fixture.path,
executableName: "jq",
cwd: nil)
#expect(ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName))
}
@Test func `skill auto allow rejects same basename at different path`() throws {
let trusted = try Self.makeExecutable(named: "jq")
let untrusted = try Self.makeExecutable(named: "jq")
defer {
try? FileManager.default.removeItem(at: trusted.root)
try? FileManager.default.removeItem(at: untrusted.root)
}
let trust = SkillBinsCache._testBuildTrustIndex(
report: Self.makeReport(bins: ["jq"]),
searchPaths: [trusted.root.path])
let resolution = ExecCommandResolution(
rawExecutable: "jq",
resolvedPath: untrusted.path,
executableName: "jq",
cwd: nil)
#expect(!ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName))
}
private static func makeExecutable(named name: String) throws -> (root: URL, path: String) {
let root = FileManager.default.temporaryDirectory
.appendingPathComponent("openclaw-skill-bin-\(UUID().uuidString)", isDirectory: true)
try FileManager.default.createDirectory(at: root, withIntermediateDirectories: true)
let file = root.appendingPathComponent(name)
try "#!/bin/sh\nexit 0\n".write(to: file, atomically: true, encoding: .utf8)
try FileManager.default.setAttributes(
[.posixPermissions: NSNumber(value: Int16(0o755))],
ofItemAtPath: file.path)
return (root, file.path)
}
private static func makeReport(bins: [String]) -> SkillsStatusReport {
SkillsStatusReport(
workspaceDir: "/tmp/workspace",
managedSkillsDir: "/tmp/skills",
skills: [
SkillStatus(
name: "test-skill",
description: "test",
source: "local",
filePath: "/tmp/skills/test-skill/SKILL.md",
baseDir: "/tmp/skills/test-skill",
skillKey: "test-skill",
primaryEnv: nil,
emoji: nil,
homepage: nil,
always: false,
disabled: false,
eligible: true,
requirements: SkillRequirements(bins: bins, env: [], config: []),
missing: SkillMissing(bins: [], env: [], config: []),
configChecks: [],
install: [])
])
}
}

View File

@ -139,6 +139,54 @@ struct LowCoverageHelperTests {
#expect(emptyReport.summary.contains("Nothing is listening"))
}
@Test func `port guardian remote mode does not kill docker`() {
#expect(PortGuardian._testIsExpected(
command: "com.docker.backend",
fullCommand: "com.docker.backend",
port: 18789, mode: .remote) == true)
#expect(PortGuardian._testIsExpected(
command: "ssh",
fullCommand: "ssh -L 18789:localhost:18789 user@host",
port: 18789, mode: .remote) == true)
#expect(PortGuardian._testIsExpected(
command: "podman",
fullCommand: "podman",
port: 18789, mode: .remote) == true)
}
@Test func `port guardian local mode still rejects unexpected`() {
#expect(PortGuardian._testIsExpected(
command: "com.docker.backend",
fullCommand: "com.docker.backend",
port: 18789, mode: .local) == false)
#expect(PortGuardian._testIsExpected(
command: "python",
fullCommand: "python server.py",
port: 18789, mode: .local) == false)
#expect(PortGuardian._testIsExpected(
command: "node",
fullCommand: "node /path/to/gateway-daemon",
port: 18789, mode: .local) == true)
}
@Test func `port guardian remote mode report accepts any listener`() {
let dockerReport = PortGuardian._testBuildReport(
port: 18789, mode: .remote,
listeners: [(pid: 99, command: "com.docker.backend",
fullCommand: "com.docker.backend", user: "me")])
#expect(dockerReport.offenders.isEmpty)
let localDockerReport = PortGuardian._testBuildReport(
port: 18789, mode: .local,
listeners: [(pid: 99, command: "com.docker.backend",
fullCommand: "com.docker.backend", user: "me")])
#expect(!localDockerReport.offenders.isEmpty)
}
@Test @MainActor func `canvas scheme handler resolves files and errors`() throws {
let root = FileManager().temporaryDirectory
.appendingPathComponent("canvas-\(UUID().uuidString)", isDirectory: true)

View File

@ -74,4 +74,22 @@ struct VoiceWakeRuntimeTests {
let config = WakeWordGateConfig(triggers: ["openclaw"], minPostTriggerGap: 0.3)
#expect(WakeWordGate.match(transcript: transcript, segments: segments, config: config)?.command == "do thing")
}
@Test func `gate command text handles foreign string ranges`() {
let transcript = "hey openclaw do thing"
let other = "do thing"
let foreignRange = other.range(of: "do")
let segments = [
WakeWordSegment(text: "hey", start: 0.0, duration: 0.1, range: transcript.range(of: "hey")),
WakeWordSegment(text: "openclaw", start: 0.2, duration: 0.1, range: transcript.range(of: "openclaw")),
WakeWordSegment(text: "do", start: 0.9, duration: 0.1, range: foreignRange),
WakeWordSegment(text: "thing", start: 1.1, duration: 0.1, range: nil),
]
#expect(
WakeWordGate.commandText(
transcript: transcript,
segments: segments,
triggerEndTime: 0.3) == "do thing")
}
}

View File

@ -73,7 +73,7 @@ await web_search({
## Notes
- OpenClaw uses the Brave **Search** plan. If you have a legacy subscription (e.g. the original Free plan with 2,000 queries/month), it remains valid but does not include newer features like LLM Context or higher rate limits.
- Each Brave plan includes **$5/month in free credit** (renewing). The Search plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans.
- Each Brave plan includes **\$5/month in free credit** (renewing). The Search plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans.
- The Search plan includes the LLM Context endpoint and AI inference rights. Storing results to train or tune models requires a plan with explicit storage rights. See the Brave [Terms of Service](https://api-dashboard.search.brave.com/terms-of-service).
- Results are cached for 15 minutes by default (configurable via `cacheTtlMinutes`).

View File

@ -195,6 +195,8 @@ Groups:
- `channels.signal.groupPolicy = open | allowlist | disabled`.
- `channels.signal.groupAllowFrom` controls who can trigger in groups when `allowlist` is set.
- `channels.signal.groups["<group-id>" | "*"]` can override group behavior with `requireMention`, `tools`, and `toolsBySender`.
- Use `channels.signal.accounts.<id>.groups` for per-account overrides in multi-account setups.
- Runtime note: if `channels.signal` is completely missing, runtime falls back to `groupPolicy="allowlist"` for group checks (even if `channels.defaults.groupPolicy` is set).
## How it works (behavior)
@ -312,6 +314,8 @@ Provider options:
- `channels.signal.allowFrom`: DM allowlist (E.164 or `uuid:<id>`). `open` requires `"*"`. Signal has no usernames; use phone/UUID ids.
- `channels.signal.groupPolicy`: `open | allowlist | disabled` (default: allowlist).
- `channels.signal.groupAllowFrom`: group sender allowlist.
- `channels.signal.groups`: per-group overrides keyed by Signal group id (or `"*"`). Supported fields: `requireMention`, `tools`, `toolsBySender`.
- `channels.signal.accounts.<id>.groups`: per-account version of `channels.signal.groups` for multi-account setups.
- `channels.signal.historyLimit`: max group messages to include as context (0 disables).
- `channels.signal.dmHistoryLimit`: DM history limit in user turns. Per-user overrides: `channels.signal.dms["<phone_or_uuid>"].historyLimit`.
- `channels.signal.textChunkLimit`: outbound chunk size (chars).

View File

@ -218,6 +218,55 @@ For actions/directory reads, user token can be preferred when configured. For wr
- if encoded option values exceed Slack limits, the flow falls back to buttons
- For long option payloads, Slash command argument menus use a confirm dialog before dispatching a selected value.
## Interactive replies
Slack can render agent-authored interactive reply controls, but this feature is disabled by default.
Enable it globally:
```json5
{
channels: {
slack: {
capabilities: {
interactiveReplies: true,
},
},
},
}
```
Or enable it for one Slack account only:
```json5
{
channels: {
slack: {
accounts: {
ops: {
capabilities: {
interactiveReplies: true,
},
},
},
},
},
}
```
When enabled, agents can emit Slack-only reply directives:
- `[[slack_buttons: Approve:approve, Reject:reject]]`
- `[[slack_select: Choose a target | Canary:canary, Production:production]]`
These directives compile into Slack Block Kit and route clicks or selections back through the existing Slack interaction event path.
Notes:
- This is Slack-specific UI. Other channels do not translate Slack Block Kit directives into their own button systems.
- The interactive callback values are OpenClaw-generated opaque tokens, not raw agent-authored values.
- If generated interactive blocks would exceed Slack Block Kit limits, OpenClaw falls back to the original text reply instead of sending an invalid blocks payload.
Default slash command settings:
- `enabled: false`

View File

@ -9,32 +9,32 @@ read_when:
# CI Pipeline
The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only docs or native code changed.
The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only unrelated areas changed.
## Job Overview
| Job | Purpose | When it runs |
| ----------------- | ------------------------------------------------------- | ------------------------------------------------- |
| `docs-scope` | Detect docs-only changes | Always |
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-docs PRs |
| `check` | TypeScript types, lint, format | Push to `main`, or PRs with Node-relevant changes |
| `check-docs` | Markdown lint + broken link check | Docs changed |
| `code-analysis` | LOC threshold check (1000 lines) | PRs only |
| `secrets` | Detect leaked secrets | Always |
| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes |
| `release-check` | Validate npm pack contents | After build |
| `checks` | Node/Bun tests + protocol check | Non-docs, node changes |
| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes |
| `macos` | Swift lint/build/test + TS tests | PRs with macos changes |
| `android` | Gradle build + tests | Non-docs, android changes |
| Job | Purpose | When it runs |
| ----------------- | ------------------------------------------------------- | ---------------------------------- |
| `docs-scope` | Detect docs-only changes | Always |
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-doc changes |
| `check` | TypeScript types, lint, format | Non-docs, node changes |
| `check-docs` | Markdown lint + broken link check | Docs changed |
| `secrets` | Detect leaked secrets | Always |
| `build-artifacts` | Build dist once, share with `release-check` | Pushes to `main`, node changes |
| `release-check` | Validate npm pack contents | Pushes to `main` after build |
| `checks` | Node tests + protocol check on PRs; Bun compat on push | Non-docs, node changes |
| `compat-node22` | Minimum supported Node runtime compatibility | Pushes to `main`, node changes |
| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes |
| `macos` | Swift lint/build/test + TS tests | PRs with macos changes |
| `android` | Gradle build + tests | Non-docs, android changes |
## Fail-Fast Order
Jobs are ordered so cheap checks fail before expensive ones run:
1. `docs-scope` + `code-analysis` + `check` (parallel, ~1-2 min)
2. `build-artifacts` (blocked on above)
3. `checks`, `checks-windows`, `macos`, `android` (blocked on build)
1. `docs-scope` + `changed-scope` + `check` + `secrets` (parallel, cheap gates first)
2. PRs: `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android`
3. Pushes to `main`: `build-artifacts` + `release-check` + Bun compat + `compat-node22`
Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`.

View File

@ -18,77 +18,16 @@ This endpoint is **disabled by default**. Enable it in config first.
Under the hood, requests are executed as a normal Gateway agent run (same codepath as
`openclaw agent`), so routing/permissions/config match your Gateway.
## Authentication
## Authentication, security, and routing
Uses the Gateway auth configuration. Send a bearer token:
Operational behavior matches [OpenAI Chat Completions](/gateway/openai-http-api):
- `Authorization: Bearer <token>`
- use `Authorization: Bearer <token>` with the normal Gateway auth config
- treat the endpoint as full operator access for the gateway instance
- select agents with `model: "openclaw:<agentId>"`, `model: "agent:<agentId>"`, or `x-openclaw-agent-id`
- use `x-openclaw-session-key` for explicit session routing
Notes:
- When `gateway.auth.mode="token"`, use `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`).
- When `gateway.auth.mode="password"`, use `gateway.auth.password` (or `OPENCLAW_GATEWAY_PASSWORD`).
- If `gateway.auth.rateLimit` is configured and too many auth failures occur, the endpoint returns `429` with `Retry-After`.
## Security boundary (important)
Treat this endpoint as a **full operator-access** surface for the gateway instance.
- HTTP bearer auth here is not a narrow per-user scope model.
- A valid Gateway token/password for this endpoint should be treated like an owner/operator credential.
- Requests run through the same control-plane agent path as trusted operator actions.
- There is no separate non-owner/per-user tool boundary on this endpoint; once a caller passes Gateway auth here, OpenClaw treats that caller as a trusted operator for this gateway.
- If the target agent policy allows sensitive tools, this endpoint can use them.
- Keep this endpoint on loopback/tailnet/private ingress only; do not expose it directly to the public internet.
See [Security](/gateway/security) and [Remote access](/gateway/remote).
## Choosing an agent
No custom headers required: encode the agent id in the OpenResponses `model` field:
- `model: "openclaw:<agentId>"` (example: `"openclaw:main"`, `"openclaw:beta"`)
- `model: "agent:<agentId>"` (alias)
Or target a specific OpenClaw agent by header:
- `x-openclaw-agent-id: <agentId>` (default: `main`)
Advanced:
- `x-openclaw-session-key: <sessionKey>` to fully control session routing.
## Enabling the endpoint
Set `gateway.http.endpoints.responses.enabled` to `true`:
```json5
{
gateway: {
http: {
endpoints: {
responses: { enabled: true },
},
},
},
}
```
## Disabling the endpoint
Set `gateway.http.endpoints.responses.enabled` to `false`:
```json5
{
gateway: {
http: {
endpoints: {
responses: { enabled: false },
},
},
},
}
```
Enable or disable this endpoint with `gateway.http.endpoints.responses.enabled`.
## Session behavior

View File

@ -53,8 +53,8 @@ Think of the suites as “increasing realism” (and increasing flakiness/cost):
- No real keys required
- Should be fast and stable
- Pool note:
- OpenClaw uses Vitest `vmForks` on Node 22/23 for faster unit shards.
- On Node 24+, OpenClaw automatically falls back to regular `forks` to avoid Node VM linking errors (`ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`).
- OpenClaw uses Vitest `vmForks` on Node 22, 23, and 24 for faster unit shards.
- On Node 25+, OpenClaw automatically falls back to regular `forks` until the repo is re-validated there.
- Override manually with `OPENCLAW_TEST_VM_FORKS=0` (force `forks`) or `OPENCLAW_TEST_VM_FORKS=1` (force `vmForks`).
### E2E (gateway smoke)

View File

@ -0,0 +1,138 @@
---
summary: "Shared Docker VM runtime steps for long-lived OpenClaw Gateway hosts"
read_when:
- You are deploying OpenClaw on a cloud VM with Docker
- You need the shared binary bake, persistence, and update flow
title: "Docker VM Runtime"
---
# Docker VM Runtime
Shared runtime steps for VM-based Docker installs such as GCP, Hetzner, and similar VPS providers.
## Bake required binaries into the image
Installing binaries inside a running container is a trap.
Anything installed at runtime will be lost on restart.
All external binaries required by skills must be installed at image build time.
The examples below show three common binaries only:
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
## Build and launch
```bash
docker compose build
docker compose up -d openclaw-gateway
```
If build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory.
Use a larger machine class before retrying.
Verify binaries:
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
Verify Gateway:
```bash
docker compose logs -f openclaw-gateway
```
Expected output:
```
[gateway] listening on ws://0.0.0.0:18789
```
## What persists where
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
## Updates
To update OpenClaw on the VM:
```bash
git pull
docker compose build
docker compose up -d
```

View File

@ -281,77 +281,20 @@ services:
---
## 10) Bake required binaries into the image (critical)
## 10) Shared Docker VM runtime steps
Installing binaries inside a running container is a trap.
Anything installed at runtime will be lost on restart.
Use the shared runtime guide for the common Docker host flow:
All external binaries required by skills must be installed at image build time.
The examples below show three common binaries only:
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
- [What persists where](/install/docker-vm-runtime#what-persists-where)
- [Updates](/install/docker-vm-runtime#updates)
---
## 11) Build and launch
## 11) GCP-specific launch notes
```bash
docker compose build
docker compose up -d openclaw-gateway
```
If build fails with `Killed` / `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
On GCP, if build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing:
@ -361,39 +304,7 @@ docker compose run --rm openclaw-cli config set gateway.controlUi.allowedOrigins
If you changed the gateway port, replace `18789` with your configured port.
Verify binaries:
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
---
## 12) Verify Gateway
```bash
docker compose logs -f openclaw-gateway
```
Success:
```
[gateway] listening on ws://0.0.0.0:18789
```
---
## 13) Access from your laptop
## 12) Access from your laptop
Create an SSH tunnel to forward the Gateway port:
@ -420,38 +331,8 @@ docker compose run --rm openclaw-cli devices list
docker compose run --rm openclaw-cli devices approve <requestId>
```
---
## What persists where (source of truth)
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
---
## Updates
To update OpenClaw on the VM:
```bash
cd ~/openclaw
git pull
docker compose build
docker compose up -d
```
Need the shared persistence and update reference again?
See [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where) and [Docker VM Runtime updates](/install/docker-vm-runtime#updates).
---

View File

@ -202,107 +202,20 @@ services:
---
## 7) Bake required binaries into the image (critical)
## 7) Shared Docker VM runtime steps
Installing binaries inside a running container is a trap.
Anything installed at runtime will be lost on restart.
Use the shared runtime guide for the common Docker host flow:
All external binaries required by skills must be installed at image build time.
The examples below show three common binaries only:
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
- [What persists where](/install/docker-vm-runtime#what-persists-where)
- [Updates](/install/docker-vm-runtime#updates)
---
## 8) Build and launch
## 8) Hetzner-specific access
```bash
docker compose build
docker compose up -d openclaw-gateway
```
Verify binaries:
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
---
## 9) Verify Gateway
```bash
docker compose logs -f openclaw-gateway
```
Success:
```
[gateway] listening on ws://0.0.0.0:18789
```
From your laptop:
After the shared build and launch steps, tunnel from your laptop:
```bash
ssh -N -L 18789:127.0.0.1:18789 root@YOUR_VPS_IP
@ -316,25 +229,7 @@ Paste your gateway token.
---
## What persists where (source of truth)
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
---
The shared persistence map lives in [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where).
## Infrastructure as Code (Terraform)

View File

@ -9,6 +9,8 @@ title: "Android App"
# Android App (Node)
> **Note:** The Android app has not been publicly released yet. The source code is available in the [OpenClaw repository](https://github.com/openclaw/openclaw) under `apps/android`. You can build it yourself using Java 17 and the Android SDK (`./gradlew :app:assembleDebug`). See [apps/android/README.md](https://github.com/openclaw/openclaw/blob/main/apps/android/README.md) for build instructions.
## Support snapshot
- Role: companion node app (Android does not host the Gateway).

View File

@ -296,6 +296,12 @@ Inbound policy defaults to `disabled`. To enable inbound calls, set:
}
```
`inboundPolicy: "allowlist"` is a low-assurance caller-ID screen. The plugin
normalizes the provider-supplied `From` value and compares it to `allowFrom`.
Webhook verification authenticates provider delivery and payload integrity, but
it does not prove PSTN/VoIP caller-number ownership. Treat `allowFrom` as
caller-ID filtering, not strong caller identity.
Auto-responses use the agent system. Tune with:
- `responseModel`

View File

@ -85,8 +85,8 @@ See [Memory](/concepts/memory).
- **Kimi (Moonshot)**: `KIMI_API_KEY`, `MOONSHOT_API_KEY`, or `tools.web.search.kimi.apiKey`
- **Perplexity Search API**: `PERPLEXITY_API_KEY`, `OPENROUTER_API_KEY`, or `tools.web.search.perplexity.apiKey`
**Brave Search free credit:** Each Brave plan includes $5/month in renewing
free credit. The Search plan costs $5 per 1,000 requests, so the credit covers
**Brave Search free credit:** Each Brave plan includes \$5/month in renewing
free credit. The Search plan costs \$5 per 1,000 requests, so the credit covers
1,000 requests/month at no charge. Set your usage limit in the Brave dashboard
to avoid unexpected charges.

View File

@ -11,7 +11,7 @@ title: "Tests"
- `pnpm test:force`: Kills any lingering gateway process holding the default control port, then runs the full Vitest suite with an isolated gateway port so server tests dont collide with a running instance. Use this when a prior gateway run left port 18789 occupied.
- `pnpm test:coverage`: Runs the unit suite with V8 coverage (via `vitest.unit.config.ts`). Global thresholds are 70% lines/branches/functions/statements. Coverage excludes integration-heavy entrypoints (CLI wiring, gateway/telegram bridges, webchat static server) to keep the target focused on unit-testable logic.
- `pnpm test` on Node 24+: OpenClaw auto-disables Vitest `vmForks` and uses `forks` to avoid `ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`.
- `pnpm test` on Node 22, 23, and 24 uses Vitest `vmForks` by default for faster startup. Node 25+ falls back to `forks` until re-validated. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`.
- `pnpm test`: runs the fast core unit lane by default for quick local feedback.
- `pnpm test:channels`: runs channel-heavy suites.
- `pnpm test:extensions`: runs extension/plugin suites.

View File

@ -167,93 +167,8 @@ openclaw onboard --non-interactive \
`--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts.
</Note>
<AccordionGroup>
<Accordion title="Gemini example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice gemini-api-key \
--gemini-api-key "$GEMINI_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Z.AI example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice zai-api-key \
--zai-api-key "$ZAI_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Vercel AI Gateway example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice ai-gateway-api-key \
--ai-gateway-api-key "$AI_GATEWAY_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Cloudflare AI Gateway example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice cloudflare-ai-gateway-api-key \
--cloudflare-ai-gateway-account-id "your-account-id" \
--cloudflare-ai-gateway-gateway-id "your-gateway-id" \
--cloudflare-ai-gateway-api-key "$CLOUDFLARE_AI_GATEWAY_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Moonshot example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice moonshot-api-key \
--moonshot-api-key "$MOONSHOT_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Synthetic example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice synthetic-api-key \
--synthetic-api-key "$SYNTHETIC_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="OpenCode example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice opencode-zen \
--opencode-zen-api-key "$OPENCODE_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog.
</Accordion>
<Accordion title="Ollama example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice ollama \
--custom-model-id "qwen3.5:27b" \
--accept-risk \
--gateway-port 18789 \
--gateway-bind loopback
```
Add `--custom-base-url "http://ollama-host:11434"` to target a remote Ollama instance.
</Accordion>
</AccordionGroup>
Provider-specific command examples live in [CLI Automation](/start/wizard-cli-automation#provider-specific-examples).
Use this reference page for flag semantics and step ordering.
### Add agent (non-interactive)

View File

@ -48,6 +48,8 @@ Gateway.
- `openclaw`: managed, isolated browser (no extension required).
- `chrome`: extension relay to your **system browser** (requires the OpenClaw
extension to be attached to a tab).
- `existing-session`: official Chrome MCP attach flow for a running Chrome
profile.
Set `browser.defaultProfile: "openclaw"` if you want managed mode by default.
@ -77,6 +79,12 @@ Browser settings live in `~/.openclaw/openclaw.json`.
profiles: {
openclaw: { cdpPort: 18800, color: "#FF4500" },
work: { cdpPort: 18801, color: "#0066CC" },
chromeLive: {
cdpPort: 18802,
driver: "existing-session",
attachOnly: true,
color: "#00AA00",
},
remote: { cdpUrl: "http://10.0.0.42:9222", color: "#00AA00" },
},
},
@ -100,6 +108,8 @@ Notes:
- Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "chrome"` to opt into the Chrome extension relay.
- Auto-detect order: system default browser if Chromium-based; otherwise Chrome → Brave → Edge → Chromium → Chrome Canary.
- Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl` — set those only for remote CDP.
- `driver: "existing-session"` uses Chrome DevTools MCP instead of raw CDP. Do
not set `cdpUrl` for that driver.
## Use Brave (or another Chromium-based browser)
@ -264,11 +274,13 @@ OpenClaw supports multiple named profiles (routing configs). Profiles can be:
- **openclaw-managed**: a dedicated Chromium-based browser instance with its own user data directory + CDP port
- **remote**: an explicit CDP URL (Chromium-based browser running elsewhere)
- **extension relay**: your existing Chrome tab(s) via the local relay + Chrome extension
- **existing session**: your existing Chrome profile via Chrome DevTools MCP auto-connect
Defaults:
- The `openclaw` profile is auto-created if missing.
- The `chrome` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default).
- Existing-session profiles are opt-in; create them with `--driver existing-session`.
- Local CDP ports allocate from **1880018899** by default.
- Deleting a profile moves its local data directory to Trash.
@ -328,6 +340,70 @@ Notes:
- This mode relies on Playwright-on-CDP for most operations (screenshots/snapshots/actions).
- Detach by clicking the extension icon again.
## Chrome existing-session via MCP
OpenClaw can also attach to a running Chrome profile through the official
Chrome DevTools MCP server. This reuses the tabs and login state already open in
that Chrome profile.
Official background and setup references:
- [Chrome for Developers: Use Chrome DevTools MCP with your browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
- [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp)
Create a profile:
```bash
openclaw browser create-profile \
--name chrome-live \
--driver existing-session \
--color "#00AA00"
```
Then in Chrome:
1. Open `chrome://inspect/#remote-debugging`
2. Enable remote debugging
3. Keep Chrome running and approve the connection prompt when OpenClaw attaches
Live attach smoke test:
```bash
openclaw browser --browser-profile chrome-live start
openclaw browser --browser-profile chrome-live status
openclaw browser --browser-profile chrome-live tabs
openclaw browser --browser-profile chrome-live snapshot --format ai
```
What success looks like:
- `status` shows `driver: existing-session`
- `status` shows `running: true`
- `tabs` lists your already-open Chrome tabs
- `snapshot` returns refs from the selected live tab
What to check if attach does not work:
- Chrome is version `144+`
- remote debugging is enabled at `chrome://inspect/#remote-debugging`
- Chrome showed and you accepted the attach consent prompt
- the Gateway or node host can spawn `npx chrome-devtools-mcp@latest --autoConnect`
Notes:
- This path is higher-risk than the isolated `openclaw` profile because it can
act inside your signed-in browser session.
- OpenClaw does not launch Chrome for this driver; it attaches to an existing
session only.
- OpenClaw uses the official Chrome DevTools MCP `--autoConnect` flow here, not
the legacy default-profile remote debugging port workflow.
- Existing-session screenshots support page captures and `--ref` element
captures from snapshots, but not CSS `--element` selectors.
- Existing-session `wait --url` supports exact, substring, and glob patterns
like other browser drivers. `wait --load networkidle` is not supported yet.
- Some features still require the extension relay or managed browser path, such
as PDF export and download interception.
- Leave the relay loopback-only by default. If the relay must be reachable from a different network namespace (for example Gateway in WSL2, Chrome on Windows), set `browser.relayBindHost` to an explicit bind address such as `0.0.0.0` while keeping the surrounding network private and authenticated.
WSL2 / cross-namespace example:

View File

@ -13,6 +13,13 @@ The OpenClaw Chrome extension lets the agent control your **existing Chrome tabs
Attach/detach happens via a **single Chrome toolbar button**.
If you want Chromes official DevTools MCP attach flow instead of the OpenClaw
extension relay, use an `existing-session` browser profile instead. See
[Browser](/tools/browser#chrome-existing-session-via-mcp). For Chromes own
setup docs, see [Chrome for Developers: Use Chrome DevTools MCP with your
browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
and the [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp).
## What it is (concept)
There are three parts:

View File

@ -271,6 +271,8 @@ Approval-backed interpreter/runtime runs are intentionally conservative:
- Exact argv/cwd/env context is always bound.
- Direct shell script and direct runtime file forms are best-effort bound to one concrete local
file snapshot.
- Common package-manager wrapper forms that still resolve to one direct local file (for example
`pnpm exec`, `pnpm node`, `npm exec`, `npx`) are unwrapped before binding.
- If OpenClaw cannot identify exactly one concrete local file for an interpreter/runtime command
(for example package scripts, eval forms, runtime-specific loader chains, or ambiguous multi-file
forms), approval-backed execution is denied instead of claiming semantic coverage it does not

View File

@ -85,6 +85,13 @@ Implications:
Use allowlists and explicit install/load paths for non-bundled plugins. Treat
workspace plugins as development-time code, not production defaults.
Important trust note:
- `plugins.allow` trusts **plugin ids**, not source provenance.
- A workspace plugin with the same id as a bundled plugin intentionally shadows
the bundled copy when that workspace plugin is enabled/allowlisted.
- This is normal and useful for local development, patch testing, and hotfixes.
## Available plugins (official)
- Microsoft Teams is plugin-only as of 2026.1.15; install `@openclaw/msteams` if you use Teams.
@ -363,6 +370,14 @@ manifest.
If multiple plugins resolve to the same id, the first match in the order above
wins and lower-precedence copies are ignored.
That means:
- workspace plugins intentionally shadow bundled plugins with the same id
- `plugins.allow: ["foo"]` authorizes the active `foo` plugin by id, even when
the active copy comes from the workspace instead of the bundled extension root
- if you need stricter provenance control, use explicit install/load paths and
inspect the resolved plugin source before enabling it
### Enablement rules
Enablement is resolved after discovery:
@ -372,6 +387,7 @@ Enablement is resolved after discovery:
- `plugins.entries.<id>.enabled: false` disables that plugin
- workspace-origin plugins are disabled by default
- allowlists restrict the active set when `plugins.allow` is non-empty
- allowlists are **id-based**, not source-based
- bundled plugins are disabled by default unless:
- the bundled id is in the built-in default-on set, or
- you explicitly enable it, or
@ -1322,6 +1338,8 @@ Plugins run in-process with the Gateway. Treat them as trusted code:
- Only install plugins you trust.
- Prefer `plugins.allow` allowlists.
- Remember that `plugins.allow` is id-based, so an enabled workspace plugin can
intentionally shadow a bundled plugin with the same id.
- Restart the Gateway after changes.
## Testing plugins

View File

@ -65,8 +65,8 @@ Use `openclaw configure --section web` to set up your API key and choose a provi
2. In the dashboard, choose the **Search** plan and generate an API key.
3. Run `openclaw configure --section web` to store the key in config, or set `BRAVE_API_KEY` in your environment.
Each Brave plan includes **$5/month in free credit** (renewing). The Search
plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set
Each Brave plan includes **\$5/month in free credit** (renewing). The Search
plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set
your usage limit in the Brave dashboard to avoid unexpected charges. See the
[Brave API portal](https://brave.com/search/api/) for current plans and
pricing.

View File

@ -54,6 +54,49 @@ describe("acpx ensure", () => {
}
});
function mockEnsureInstallFlow() {
spawnAndCollectMock
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
}
function expectEnsureInstallCalls(stripProviderAuthEnvVars?: boolean) {
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
}
it("accepts the pinned acpx version", async () => {
spawnAndCollectMock.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
@ -177,25 +220,7 @@ describe("acpx ensure", () => {
});
it("installs and verifies pinned acpx when precheck fails", async () => {
spawnAndCollectMock
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
mockEnsureInstallFlow();
await ensureAcpx({
command: "/plugin/node_modules/.bin/acpx",
@ -204,33 +229,11 @@ describe("acpx ensure", () => {
});
expect(spawnAndCollectMock).toHaveBeenCalledTimes(3);
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
});
expectEnsureInstallCalls();
});
it("threads stripProviderAuthEnvVars through version probes and install", async () => {
spawnAndCollectMock
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
mockEnsureInstallFlow();
await ensureAcpx({
command: "/plugin/node_modules/.bin/acpx",
@ -239,24 +242,7 @@ describe("acpx ensure", () => {
stripProviderAuthEnvVars: true,
});
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
expectEnsureInstallCalls(true);
});
it("fails with actionable error when npm install fails", async () => {

View File

@ -162,6 +162,39 @@ function resolveTextChunk(params: {
};
}
function createTextDeltaEvent(params: {
content: string | null | undefined;
stream: "output" | "thought";
tag?: AcpSessionUpdateTag;
}): AcpRuntimeEvent | null {
if (params.content == null || params.content.length === 0) {
return null;
}
return {
type: "text_delta",
text: params.content,
stream: params.stream,
...(params.tag ? { tag: params.tag } : {}),
};
}
function createToolCallEvent(params: {
payload: Record<string, unknown>;
tag: AcpSessionUpdateTag;
}): AcpRuntimeEvent {
const title = asTrimmedString(params.payload.title) || "tool call";
const status = asTrimmedString(params.payload.status);
const toolCallId = asOptionalString(params.payload.toolCallId);
return {
type: "tool_call",
text: status ? `${title} (${status})` : title,
tag: params.tag,
...(toolCallId ? { toolCallId } : {}),
...(status ? { status } : {}),
title,
};
}
export function parsePromptEventLine(line: string): AcpRuntimeEvent | null {
const trimmed = line.trim();
if (!trimmed) {
@ -187,57 +220,28 @@ export function parsePromptEventLine(line: string): AcpRuntimeEvent | null {
const tag = structured.tag;
switch (type) {
case "text": {
const content = asString(payload.content);
if (content == null || content.length === 0) {
return null;
}
return {
type: "text_delta",
text: content,
case "text":
return createTextDeltaEvent({
content: asString(payload.content),
stream: "output",
...(tag ? { tag } : {}),
};
}
case "thought": {
const content = asString(payload.content);
if (content == null || content.length === 0) {
return null;
}
return {
type: "text_delta",
text: content,
tag,
});
case "thought":
return createTextDeltaEvent({
content: asString(payload.content),
stream: "thought",
...(tag ? { tag } : {}),
};
}
case "tool_call": {
const title = asTrimmedString(payload.title) || "tool call";
const status = asTrimmedString(payload.status);
const toolCallId = asOptionalString(payload.toolCallId);
return {
type: "tool_call",
text: status ? `${title} (${status})` : title,
tag,
});
case "tool_call":
return createToolCallEvent({
payload,
tag: (tag ?? "tool_call") as AcpSessionUpdateTag,
...(toolCallId ? { toolCallId } : {}),
...(status ? { status } : {}),
title,
};
}
case "tool_call_update": {
const title = asTrimmedString(payload.title) || "tool call";
const status = asTrimmedString(payload.status);
const toolCallId = asOptionalString(payload.toolCallId);
const text = status ? `${title} (${status})` : title;
return {
type: "tool_call",
text,
});
case "tool_call_update":
return createToolCallEvent({
payload,
tag: (tag ?? "tool_call_update") as AcpSessionUpdateTag,
...(toolCallId ? { toolCallId } : {}),
...(status ? { status } : {}),
title,
};
}
});
case "agent_message_chunk":
return resolveTextChunk({
payload,

View File

@ -254,6 +254,44 @@ describe("waitForExit", () => {
});
describe("spawnAndCollect", () => {
type SpawnedEnvSnapshot = {
openai?: string;
github?: string;
hf?: string;
openclaw?: string;
shell?: string;
};
function stubProviderAuthEnv(env: Record<string, string>) {
for (const [key, value] of Object.entries(env)) {
vi.stubEnv(key, value);
}
}
async function collectSpawnedEnvSnapshot(options?: {
stripProviderAuthEnvVars?: boolean;
openAiEnvKey?: string;
githubEnvKey?: string;
hfEnvKey?: string;
}): Promise<SpawnedEnvSnapshot> {
const openAiEnvKey = options?.openAiEnvKey ?? "OPENAI_API_KEY";
const githubEnvKey = options?.githubEnvKey ?? "GITHUB_TOKEN";
const hfEnvKey = options?.hfEnvKey ?? "HF_TOKEN";
const result = await spawnAndCollect({
command: process.execPath,
args: [
"-e",
`process.stdout.write(JSON.stringify({openai:process.env.${openAiEnvKey},github:process.env.${githubEnvKey},hf:process.env.${hfEnvKey},openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))`,
],
cwd: process.cwd(),
stripProviderAuthEnvVars: options?.stripProviderAuthEnvVars,
});
expect(result.code).toBe(0);
expect(result.error).toBeNull();
return JSON.parse(result.stdout) as SpawnedEnvSnapshot;
}
it("returns abort error immediately when signal is already aborted", async () => {
const controller = new AbortController();
controller.abort();
@ -292,31 +330,15 @@ describe("spawnAndCollect", () => {
});
it("strips shared provider auth env vars from spawned acpx children", async () => {
vi.stubEnv("OPENAI_API_KEY", "openai-secret");
vi.stubEnv("GITHUB_TOKEN", "gh-secret");
vi.stubEnv("HF_TOKEN", "hf-secret");
vi.stubEnv("OPENCLAW_API_KEY", "keep-me");
const result = await spawnAndCollect({
command: process.execPath,
args: [
"-e",
"process.stdout.write(JSON.stringify({openai:process.env.OPENAI_API_KEY,github:process.env.GITHUB_TOKEN,hf:process.env.HF_TOKEN,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))",
],
cwd: process.cwd(),
stubProviderAuthEnv({
OPENAI_API_KEY: "openai-secret",
GITHUB_TOKEN: "gh-secret",
HF_TOKEN: "hf-secret",
OPENCLAW_API_KEY: "keep-me",
});
const parsed = await collectSpawnedEnvSnapshot({
stripProviderAuthEnvVars: true,
});
expect(result.code).toBe(0);
expect(result.error).toBeNull();
const parsed = JSON.parse(result.stdout) as {
openai?: string;
github?: string;
hf?: string;
openclaw?: string;
shell?: string;
};
expect(parsed.openai).toBeUndefined();
expect(parsed.github).toBeUndefined();
expect(parsed.hf).toBeUndefined();
@ -325,29 +347,16 @@ describe("spawnAndCollect", () => {
});
it("strips provider auth env vars case-insensitively", async () => {
vi.stubEnv("OpenAI_Api_Key", "openai-secret");
vi.stubEnv("Github_Token", "gh-secret");
vi.stubEnv("OPENCLAW_API_KEY", "keep-me");
const result = await spawnAndCollect({
command: process.execPath,
args: [
"-e",
"process.stdout.write(JSON.stringify({openai:process.env.OpenAI_Api_Key,github:process.env.Github_Token,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))",
],
cwd: process.cwd(),
stripProviderAuthEnvVars: true,
stubProviderAuthEnv({
OpenAI_Api_Key: "openai-secret",
Github_Token: "gh-secret",
OPENCLAW_API_KEY: "keep-me",
});
const parsed = await collectSpawnedEnvSnapshot({
stripProviderAuthEnvVars: true,
openAiEnvKey: "OpenAI_Api_Key",
githubEnvKey: "Github_Token",
});
expect(result.code).toBe(0);
expect(result.error).toBeNull();
const parsed = JSON.parse(result.stdout) as {
openai?: string;
github?: string;
openclaw?: string;
shell?: string;
};
expect(parsed.openai).toBeUndefined();
expect(parsed.github).toBeUndefined();
expect(parsed.openclaw).toBe("keep-me");
@ -355,30 +364,13 @@ describe("spawnAndCollect", () => {
});
it("preserves provider auth env vars for explicit custom commands by default", async () => {
vi.stubEnv("OPENAI_API_KEY", "openai-secret");
vi.stubEnv("GITHUB_TOKEN", "gh-secret");
vi.stubEnv("HF_TOKEN", "hf-secret");
vi.stubEnv("OPENCLAW_API_KEY", "keep-me");
const result = await spawnAndCollect({
command: process.execPath,
args: [
"-e",
"process.stdout.write(JSON.stringify({openai:process.env.OPENAI_API_KEY,github:process.env.GITHUB_TOKEN,hf:process.env.HF_TOKEN,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))",
],
cwd: process.cwd(),
stubProviderAuthEnv({
OPENAI_API_KEY: "openai-secret",
GITHUB_TOKEN: "gh-secret",
HF_TOKEN: "hf-secret",
OPENCLAW_API_KEY: "keep-me",
});
expect(result.code).toBe(0);
expect(result.error).toBeNull();
const parsed = JSON.parse(result.stdout) as {
openai?: string;
github?: string;
hf?: string;
openclaw?: string;
shell?: string;
};
const parsed = await collectSpawnedEnvSnapshot();
expect(parsed.openai).toBe("openai-secret");
expect(parsed.github).toBe("gh-secret");
expect(parsed.hf).toBe("hf-secret");

View File

@ -13,7 +13,7 @@ import type {
} from "openclaw/plugin-sdk/acpx";
import { AcpRuntimeError } from "openclaw/plugin-sdk/acpx";
import { toAcpMcpServers, type ResolvedAcpxPluginConfig } from "./config.js";
import { checkAcpxVersion } from "./ensure.js";
import { checkAcpxVersion, type AcpxVersionCheckResult } from "./ensure.js";
import {
parseJsonLines,
parsePromptEventLine,
@ -51,6 +51,28 @@ const ACPX_CAPABILITIES: AcpRuntimeCapabilities = {
controls: ["session/set_mode", "session/set_config_option", "session/status"],
};
type AcpxHealthCheckResult =
| {
ok: true;
versionCheck: Extract<AcpxVersionCheckResult, { ok: true }>;
}
| {
ok: false;
failure:
| {
kind: "version-check";
versionCheck: Extract<AcpxVersionCheckResult, { ok: false }>;
}
| {
kind: "help-check";
result: Awaited<ReturnType<typeof spawnAndCollect>>;
}
| {
kind: "exception";
error: unknown;
};
};
function formatPermissionModeGuidance(): string {
return "Configure plugins.entries.acpx.config.permissionMode to one of: approve-reads, approve-all, deny-all.";
}
@ -165,35 +187,71 @@ export class AcpxRuntime implements AcpRuntime {
);
}
async probeAvailability(): Promise<void> {
const versionCheck = await checkAcpxVersion({
private async checkVersion(): Promise<AcpxVersionCheckResult> {
return await checkAcpxVersion({
command: this.config.command,
cwd: this.config.cwd,
expectedVersion: this.config.expectedVersion,
stripProviderAuthEnvVars: this.config.stripProviderAuthEnvVars,
spawnOptions: this.spawnCommandOptions,
});
}
private async runHelpCheck(): Promise<Awaited<ReturnType<typeof spawnAndCollect>>> {
return await spawnAndCollect(
{
command: this.config.command,
args: ["--help"],
cwd: this.config.cwd,
stripProviderAuthEnvVars: this.config.stripProviderAuthEnvVars,
},
this.spawnCommandOptions,
);
}
private async checkHealth(): Promise<AcpxHealthCheckResult> {
const versionCheck = await this.checkVersion();
if (!versionCheck.ok) {
this.healthy = false;
return;
return {
ok: false,
failure: {
kind: "version-check",
versionCheck,
},
};
}
try {
const result = await spawnAndCollect(
{
command: this.config.command,
args: ["--help"],
cwd: this.config.cwd,
stripProviderAuthEnvVars: this.config.stripProviderAuthEnvVars,
const result = await this.runHelpCheck();
if (result.error != null || (result.code ?? 0) !== 0) {
return {
ok: false,
failure: {
kind: "help-check",
result,
},
};
}
return {
ok: true,
versionCheck,
};
} catch (error) {
return {
ok: false,
failure: {
kind: "exception",
error,
},
this.spawnCommandOptions,
);
this.healthy = result.error == null && (result.code ?? 0) === 0;
} catch {
this.healthy = false;
};
}
}
async probeAvailability(): Promise<void> {
const result = await this.checkHealth();
this.healthy = result.ok;
}
async ensureSession(input: AcpRuntimeEnsureInput): Promise<AcpRuntimeHandle> {
const sessionName = asTrimmedString(input.sessionKey);
if (!sessionName) {
@ -494,14 +552,9 @@ export class AcpxRuntime implements AcpRuntime {
}
async doctor(): Promise<AcpRuntimeDoctorReport> {
const versionCheck = await checkAcpxVersion({
command: this.config.command,
cwd: this.config.cwd,
expectedVersion: this.config.expectedVersion,
stripProviderAuthEnvVars: this.config.stripProviderAuthEnvVars,
spawnOptions: this.spawnCommandOptions,
});
if (!versionCheck.ok) {
const result = await this.checkHealth();
if (!result.ok && result.failure.kind === "version-check") {
const { versionCheck } = result.failure;
this.healthy = false;
const details = [
versionCheck.expectedVersion ? `expected=${versionCheck.expectedVersion}` : null,
@ -516,20 +569,12 @@ export class AcpxRuntime implements AcpRuntime {
};
}
try {
const result = await spawnAndCollect(
{
command: this.config.command,
args: ["--help"],
cwd: this.config.cwd,
stripProviderAuthEnvVars: this.config.stripProviderAuthEnvVars,
},
this.spawnCommandOptions,
);
if (result.error) {
const spawnFailure = resolveSpawnFailure(result.error, this.config.cwd);
if (!result.ok && result.failure.kind === "help-check") {
const { result: helpResult } = result.failure;
this.healthy = false;
if (helpResult.error) {
const spawnFailure = resolveSpawnFailure(helpResult.error, this.config.cwd);
if (spawnFailure === "missing-command") {
this.healthy = false;
return {
ok: false,
code: "ACP_BACKEND_UNAVAILABLE",
@ -538,42 +583,47 @@ export class AcpxRuntime implements AcpRuntime {
};
}
if (spawnFailure === "missing-cwd") {
this.healthy = false;
return {
ok: false,
code: "ACP_BACKEND_UNAVAILABLE",
message: `ACP runtime working directory does not exist: ${this.config.cwd}`,
};
}
this.healthy = false;
return {
ok: false,
code: "ACP_BACKEND_UNAVAILABLE",
message: result.error.message,
details: [String(result.error)],
message: helpResult.error.message,
details: [String(helpResult.error)],
};
}
if ((result.code ?? 0) !== 0) {
this.healthy = false;
return {
ok: false,
code: "ACP_BACKEND_UNAVAILABLE",
message: result.stderr.trim() || `acpx exited with code ${result.code ?? "unknown"}`,
};
}
this.healthy = true;
return {
ok: true,
message: `acpx command available (${this.config.command}, version ${versionCheck.version}${this.config.expectedVersion ? `, expected ${this.config.expectedVersion}` : ""})`,
};
} catch (error) {
this.healthy = false;
return {
ok: false,
code: "ACP_BACKEND_UNAVAILABLE",
message: error instanceof Error ? error.message : String(error),
message:
helpResult.stderr.trim() || `acpx exited with code ${helpResult.code ?? "unknown"}`,
};
}
if (!result.ok) {
this.healthy = false;
const failure = result.failure;
return {
ok: false,
code: "ACP_BACKEND_UNAVAILABLE",
message:
failure.kind === "exception"
? failure.error instanceof Error
? failure.error.message
: String(failure.error)
: "acpx backend unavailable",
};
}
this.healthy = true;
return {
ok: true,
message: `acpx command available (${this.config.command}, version ${result.versionCheck.version}${this.config.expectedVersion ? `, expected ${this.config.expectedVersion}` : ""})`,
};
}
async cancel(input: { handle: AcpRuntimeHandle; reason?: string }): Promise<void> {

View File

@ -82,6 +82,15 @@ describe("downloadBlueBubblesAttachment", () => {
).rejects.toThrow("too large");
}
function mockSuccessfulAttachmentDownload(buffer = new Uint8Array([1])) {
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(buffer.buffer),
});
return buffer;
}
it("throws when guid is missing", async () => {
const attachment: BlueBubblesAttachment = {};
await expect(
@ -159,12 +168,7 @@ describe("downloadBlueBubblesAttachment", () => {
});
it("encodes guid in URL", async () => {
const mockBuffer = new Uint8Array([1]);
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
});
mockSuccessfulAttachmentDownload();
const attachment: BlueBubblesAttachment = { guid: "att/with/special chars" };
await downloadBlueBubblesAttachment(attachment, {
@ -244,12 +248,7 @@ describe("downloadBlueBubblesAttachment", () => {
});
it("resolves credentials from config when opts not provided", async () => {
const mockBuffer = new Uint8Array([1]);
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
});
mockSuccessfulAttachmentDownload();
const attachment: BlueBubblesAttachment = { guid: "att-config" };
const result = await downloadBlueBubblesAttachment(attachment, {
@ -270,12 +269,7 @@ describe("downloadBlueBubblesAttachment", () => {
});
it("passes ssrfPolicy with allowPrivateNetwork when config enables it", async () => {
const mockBuffer = new Uint8Array([1]);
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
});
mockSuccessfulAttachmentDownload();
const attachment: BlueBubblesAttachment = { guid: "att-ssrf" };
await downloadBlueBubblesAttachment(attachment, {
@ -295,12 +289,7 @@ describe("downloadBlueBubblesAttachment", () => {
});
it("auto-allowlists serverUrl hostname when allowPrivateNetwork is not set", async () => {
const mockBuffer = new Uint8Array([1]);
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
});
mockSuccessfulAttachmentDownload();
const attachment: BlueBubblesAttachment = { guid: "att-no-ssrf" };
await downloadBlueBubblesAttachment(attachment, {
@ -313,12 +302,7 @@ describe("downloadBlueBubblesAttachment", () => {
});
it("auto-allowlists private IP serverUrl hostname when allowPrivateNetwork is not set", async () => {
const mockBuffer = new Uint8Array([1]);
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
});
mockSuccessfulAttachmentDownload();
const attachment: BlueBubblesAttachment = { guid: "att-private-ip" };
await downloadBlueBubblesAttachment(attachment, {
@ -352,6 +336,14 @@ describe("sendBlueBubblesAttachment", () => {
return Buffer.from(body).toString("utf8");
}
function expectVoiceAttachmentBody() {
const body = mockFetch.mock.calls[0][1]?.body as Uint8Array;
const bodyText = decodeBody(body);
expect(bodyText).toContain('name="isAudioMessage"');
expect(bodyText).toContain("true");
return bodyText;
}
it("marks voice memos when asVoice is true and mp3 is provided", async () => {
mockFetch.mockResolvedValueOnce({
ok: true,
@ -367,10 +359,7 @@ describe("sendBlueBubblesAttachment", () => {
opts: { serverUrl: "http://localhost:1234", password: "test" },
});
const body = mockFetch.mock.calls[0][1]?.body as Uint8Array;
const bodyText = decodeBody(body);
expect(bodyText).toContain('name="isAudioMessage"');
expect(bodyText).toContain("true");
const bodyText = expectVoiceAttachmentBody();
expect(bodyText).toContain('filename="voice.mp3"');
});
@ -389,8 +378,7 @@ describe("sendBlueBubblesAttachment", () => {
opts: { serverUrl: "http://localhost:1234", password: "test" },
});
const body = mockFetch.mock.calls[0][1]?.body as Uint8Array;
const bodyText = decodeBody(body);
const bodyText = expectVoiceAttachmentBody();
expect(bodyText).toContain('filename="voice.mp3"');
expect(bodyText).toContain('name="voice.mp3"');
});

View File

@ -2,7 +2,7 @@ import crypto from "node:crypto";
import path from "node:path";
import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles";
import { resolveBlueBubblesServerAccount } from "./account-resolve.js";
import { postMultipartFormData } from "./multipart.js";
import { assertMultipartActionOk, postMultipartFormData } from "./multipart.js";
import {
getCachedBlueBubblesPrivateApiStatus,
isBlueBubblesPrivateApiStatusEnabled,
@ -262,12 +262,7 @@ export async function sendBlueBubblesAttachment(params: {
timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads
});
if (!res.ok) {
const errorText = await res.text();
throw new Error(
`BlueBubbles attachment send failed (${res.status}): ${errorText || "unknown"}`,
);
}
await assertMultipartActionOk(res, "attachment send");
const responseBody = await res.text();
if (!responseBody) {

View File

@ -29,6 +29,11 @@ describe("chat", () => {
});
}
function mockTwoOkTextResponses() {
mockOkTextResponse();
mockOkTextResponse();
}
async function expectCalledUrlIncludesPassword(params: {
password: string;
invoke: () => Promise<void>;
@ -198,15 +203,7 @@ describe("chat", () => {
});
it("uses POST for start and DELETE for stop", async () => {
mockFetch
.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve(""),
})
.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve(""),
});
mockTwoOkTextResponses();
await sendBlueBubblesTyping("iMessage;-;+15551234567", true, {
serverUrl: "http://localhost:1234",
@ -442,15 +439,7 @@ describe("chat", () => {
});
it("adds and removes participant using matching endpoint", async () => {
mockFetch
.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve(""),
})
.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve(""),
});
mockTwoOkTextResponses();
await addBlueBubblesParticipant("chat-guid", "+15551234567", {
serverUrl: "http://localhost:1234",

View File

@ -2,7 +2,7 @@ import crypto from "node:crypto";
import path from "node:path";
import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles";
import { resolveBlueBubblesServerAccount } from "./account-resolve.js";
import { postMultipartFormData } from "./multipart.js";
import { assertMultipartActionOk, postMultipartFormData } from "./multipart.js";
import { getCachedBlueBubblesPrivateApiStatus } from "./probe.js";
import { blueBubblesFetchWithTimeout, buildBlueBubblesApiUrl } from "./types.js";
@ -55,12 +55,7 @@ async function sendBlueBubblesChatEndpointRequest(params: {
{ method: params.method },
params.opts.timeoutMs,
);
if (!res.ok) {
const errorText = await res.text().catch(() => "");
throw new Error(
`BlueBubbles ${params.action} failed (${res.status}): ${errorText || "unknown"}`,
);
}
await assertMultipartActionOk(res, params.action);
}
async function sendPrivateApiJsonRequest(params: {
@ -86,12 +81,7 @@ async function sendPrivateApiJsonRequest(params: {
}
const res = await blueBubblesFetchWithTimeout(url, request, params.opts.timeoutMs);
if (!res.ok) {
const errorText = await res.text().catch(() => "");
throw new Error(
`BlueBubbles ${params.action} failed (${res.status}): ${errorText || "unknown"}`,
);
}
await assertMultipartActionOk(res, params.action);
}
export async function markBlueBubblesChatRead(
@ -329,8 +319,5 @@ export async function setGroupIconBlueBubbles(
timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads
});
if (!res.ok) {
const errorText = await res.text().catch(() => "");
throw new Error(`BlueBubbles setGroupIcon failed (${res.status}): ${errorText || "unknown"}`);
}
await assertMultipartActionOk(res, "setGroupIcon");
}

View File

@ -70,6 +70,70 @@ async function makeTempDir(): Promise<string> {
return dir;
}
async function makeTempFile(
fileName: string,
contents: string,
dir?: string,
): Promise<{ dir: string; filePath: string }> {
const resolvedDir = dir ?? (await makeTempDir());
const filePath = path.join(resolvedDir, fileName);
await fs.writeFile(filePath, contents, "utf8");
return { dir: resolvedDir, filePath };
}
async function sendLocalMedia(params: {
cfg: OpenClawConfig;
mediaPath: string;
accountId?: string;
}) {
return sendBlueBubblesMedia({
cfg: params.cfg,
to: "chat:123",
accountId: params.accountId,
mediaPath: params.mediaPath,
});
}
async function expectRejectedLocalMedia(params: {
cfg: OpenClawConfig;
mediaPath: string;
error: RegExp;
accountId?: string;
}) {
await expect(
sendLocalMedia({
cfg: params.cfg,
mediaPath: params.mediaPath,
accountId: params.accountId,
}),
).rejects.toThrow(params.error);
expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled();
}
async function expectAllowedLocalMedia(params: {
cfg: OpenClawConfig;
mediaPath: string;
expectedAttachment: Record<string, unknown>;
accountId?: string;
expectMimeDetection?: boolean;
}) {
const result = await sendLocalMedia({
cfg: params.cfg,
mediaPath: params.mediaPath,
accountId: params.accountId,
});
expect(result).toEqual({ messageId: "msg-1" });
expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1);
expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual(
expect.objectContaining(params.expectedAttachment),
);
if (params.expectMimeDetection) {
expect(runtimeMocks.detectMime).toHaveBeenCalled();
}
}
beforeEach(() => {
const runtime = createMockRuntime();
runtimeMocks = runtime.mocks;
@ -110,57 +174,43 @@ describe("sendBlueBubblesMedia local-path hardening", () => {
const outsideFile = path.join(outsideDir, "outside.txt");
await fs.writeFile(outsideFile, "not allowed", "utf8");
await expect(
sendBlueBubblesMedia({
cfg: createConfig({ mediaLocalRoots: [allowedRoot] }),
to: "chat:123",
mediaPath: outsideFile,
}),
).rejects.toThrow(/not under any configured mediaLocalRoots/i);
expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled();
await expectRejectedLocalMedia({
cfg: createConfig({ mediaLocalRoots: [allowedRoot] }),
mediaPath: outsideFile,
error: /not under any configured mediaLocalRoots/i,
});
});
it("allows local paths that are explicitly configured", async () => {
const allowedRoot = await makeTempDir();
const allowedFile = path.join(allowedRoot, "allowed.txt");
await fs.writeFile(allowedFile, "allowed", "utf8");
const { dir: allowedRoot, filePath: allowedFile } = await makeTempFile(
"allowed.txt",
"allowed",
);
const result = await sendBlueBubblesMedia({
await expectAllowedLocalMedia({
cfg: createConfig({ mediaLocalRoots: [allowedRoot] }),
to: "chat:123",
mediaPath: allowedFile,
});
expect(result).toEqual({ messageId: "msg-1" });
expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1);
expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual(
expect.objectContaining({
expectedAttachment: {
filename: "allowed.txt",
contentType: "text/plain",
}),
);
expect(runtimeMocks.detectMime).toHaveBeenCalled();
},
expectMimeDetection: true,
});
});
it("allows file:// media paths and file:// local roots", async () => {
const allowedRoot = await makeTempDir();
const allowedFile = path.join(allowedRoot, "allowed.txt");
await fs.writeFile(allowedFile, "allowed", "utf8");
const result = await sendBlueBubblesMedia({
cfg: createConfig({ mediaLocalRoots: [pathToFileURL(allowedRoot).toString()] }),
to: "chat:123",
mediaPath: pathToFileURL(allowedFile).toString(),
});
expect(result).toEqual({ messageId: "msg-1" });
expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1);
expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual(
expect.objectContaining({
filename: "allowed.txt",
}),
const { dir: allowedRoot, filePath: allowedFile } = await makeTempFile(
"allowed.txt",
"allowed",
);
await expectAllowedLocalMedia({
cfg: createConfig({ mediaLocalRoots: [pathToFileURL(allowedRoot).toString()] }),
mediaPath: pathToFileURL(allowedFile).toString(),
expectedAttachment: {
filename: "allowed.txt",
},
});
});
it("uses account-specific mediaLocalRoots over top-level roots", async () => {
@ -213,15 +263,11 @@ describe("sendBlueBubblesMedia local-path hardening", () => {
return;
}
await expect(
sendBlueBubblesMedia({
cfg: createConfig({ mediaLocalRoots: [allowedRoot] }),
to: "chat:123",
mediaPath: linkPath,
}),
).rejects.toThrow(/not under any configured mediaLocalRoots/i);
expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled();
await expectRejectedLocalMedia({
cfg: createConfig({ mediaLocalRoots: [allowedRoot] }),
mediaPath: linkPath,
error: /not under any configured mediaLocalRoots/i,
});
});
it("rejects relative mediaLocalRoots entries", async () => {

View File

@ -1,18 +1,24 @@
import { describe, expect, it } from "vitest";
import { normalizeWebhookMessage, normalizeWebhookReaction } from "./monitor-normalize.js";
function createFallbackDmPayload(overrides: Record<string, unknown> = {}) {
return {
guid: "msg-1",
isGroup: false,
isFromMe: false,
handle: null,
chatGuid: "iMessage;-;+15551234567",
...overrides,
};
}
describe("normalizeWebhookMessage", () => {
it("falls back to DM chatGuid handle when sender handle is missing", () => {
const result = normalizeWebhookMessage({
type: "new-message",
data: {
guid: "msg-1",
data: createFallbackDmPayload({
text: "hello",
isGroup: false,
isFromMe: false,
handle: null,
chatGuid: "iMessage;-;+15551234567",
},
}),
});
expect(result).not.toBeNull();
@ -78,15 +84,11 @@ describe("normalizeWebhookReaction", () => {
it("falls back to DM chatGuid handle when reaction sender handle is missing", () => {
const result = normalizeWebhookReaction({
type: "updated-message",
data: {
data: createFallbackDmPayload({
guid: "msg-2",
associatedMessageGuid: "p:0/msg-1",
associatedMessageType: 2000,
isGroup: false,
isFromMe: false,
handle: null,
chatGuid: "iMessage;-;+15551234567",
},
}),
});
expect(result).not.toBeNull();

View File

@ -582,6 +582,29 @@ export function parseTapbackText(params: {
return null;
}
const parseLeadingReactionAction = (
prefix: "reacted" | "removed",
defaultAction: "added" | "removed",
) => {
if (!lower.startsWith(prefix)) {
return null;
}
const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint;
if (!emoji) {
return null;
}
const quotedText = extractQuotedTapbackText(trimmed);
if (params.requireQuoted && !quotedText) {
return null;
}
const fallback = trimmed.slice(prefix.length).trim();
return {
emoji,
action: params.actionHint ?? defaultAction,
quotedText: quotedText ?? fallback,
};
};
for (const [pattern, { emoji, action }] of TAPBACK_TEXT_MAP) {
if (lower.startsWith(pattern)) {
// Extract quoted text if present (e.g., 'Loved "hello"' -> "hello")
@ -599,30 +622,14 @@ export function parseTapbackText(params: {
}
}
if (lower.startsWith("reacted")) {
const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint;
if (!emoji) {
return null;
}
const quotedText = extractQuotedTapbackText(trimmed);
if (params.requireQuoted && !quotedText) {
return null;
}
const fallback = trimmed.slice("reacted".length).trim();
return { emoji, action: params.actionHint ?? "added", quotedText: quotedText ?? fallback };
const reacted = parseLeadingReactionAction("reacted", "added");
if (reacted) {
return reacted;
}
if (lower.startsWith("removed")) {
const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint;
if (!emoji) {
return null;
}
const quotedText = extractQuotedTapbackText(trimmed);
if (params.requireQuoted && !quotedText) {
return null;
}
const fallback = trimmed.slice("removed".length).trim();
return { emoji, action: params.actionHint ?? "removed", quotedText: quotedText ?? fallback };
const removed = parseLeadingReactionAction("removed", "removed");
if (removed) {
return removed;
}
return null;
}

View File

@ -302,65 +302,102 @@ describe("BlueBubbles webhook monitor", () => {
};
}
describe("webhook parsing + auth handling", () => {
it("rejects non-POST requests", async () => {
const account = createMockAccount();
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
async function dispatchWebhook(req: IncomingMessage) {
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
return { handled, res };
}
unregister = registerBlueBubblesWebhookTarget({
function createWebhookRequestForTest(params?: {
method?: string;
url?: string;
body?: unknown;
headers?: Record<string, string>;
remoteAddress?: string;
}) {
const req = createMockRequest(
params?.method ?? "POST",
params?.url ?? "/bluebubbles-webhook",
params?.body ?? {},
params?.headers,
);
if (params?.remoteAddress) {
setRequestRemoteAddress(req, params.remoteAddress);
}
return req;
}
function createHangingWebhookRequest(url = "/bluebubbles-webhook?password=test-password") {
const req = new EventEmitter() as IncomingMessage;
const destroyMock = vi.fn();
req.method = "POST";
req.url = url;
req.headers = {};
req.destroy = destroyMock as unknown as IncomingMessage["destroy"];
setRequestRemoteAddress(req, "127.0.0.1");
return { req, destroyMock };
}
function registerWebhookTargets(
params: Array<{
account: ResolvedBlueBubblesAccount;
statusSink?: (event: unknown) => void;
}>,
) {
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
const unregisterFns = params.map(({ account, statusSink }) =>
registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
statusSink,
}),
);
const req = createMockRequest("GET", "/bluebubbles-webhook", {});
const res = createMockResponse();
unregister = () => {
for (const unregisterFn of unregisterFns) {
unregisterFn();
}
};
}
const handled = await handleBlueBubblesWebhookRequest(req, res);
async function expectWebhookStatus(
req: IncomingMessage,
expectedStatus: number,
expectedBody?: string,
) {
const { handled, res } = await dispatchWebhook(req);
expect(handled).toBe(true);
expect(res.statusCode).toBe(expectedStatus);
if (expectedBody !== undefined) {
expect(res.body).toBe(expectedBody);
}
return res;
}
expect(handled).toBe(true);
expect(res.statusCode).toBe(405);
describe("webhook parsing + auth handling", () => {
it("rejects non-POST requests", async () => {
setupWebhookTarget();
const req = createWebhookRequestForTest({ method: "GET" });
await expectWebhookStatus(req, 405);
});
it("accepts POST requests with valid JSON payload", async () => {
setupWebhookTarget();
const payload = createNewMessagePayload({ date: Date.now() });
const req = createMockRequest("POST", "/bluebubbles-webhook", payload);
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(200);
expect(res.body).toBe("ok");
const req = createWebhookRequestForTest({ body: payload });
await expectWebhookStatus(req, 200, "ok");
});
it("rejects requests with invalid JSON", async () => {
const account = createMockAccount();
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const req = createMockRequest("POST", "/bluebubbles-webhook", "invalid json {{");
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(400);
setupWebhookTarget();
const req = createWebhookRequestForTest({ body: "invalid json {{" });
await expectWebhookStatus(req, 400);
});
it("accepts URL-encoded payload wrappers", async () => {
@ -369,42 +406,17 @@ describe("BlueBubbles webhook monitor", () => {
const encodedBody = new URLSearchParams({
payload: JSON.stringify(payload),
}).toString();
const req = createMockRequest("POST", "/bluebubbles-webhook", encodedBody);
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(200);
expect(res.body).toBe("ok");
const req = createWebhookRequestForTest({ body: encodedBody });
await expectWebhookStatus(req, 200, "ok");
});
it("returns 408 when request body times out (Slow-Loris protection)", async () => {
vi.useFakeTimers();
try {
const account = createMockAccount();
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
setupWebhookTarget();
// Create a request that never sends data or ends (simulates slow-loris)
const req = new EventEmitter() as IncomingMessage;
req.method = "POST";
req.url = "/bluebubbles-webhook?password=test-password";
req.headers = {};
(req as unknown as { socket: { remoteAddress: string } }).socket = {
remoteAddress: "127.0.0.1",
};
req.destroy = vi.fn();
const { req, destroyMock } = createHangingWebhookRequest();
const res = createMockResponse();
@ -416,7 +428,7 @@ describe("BlueBubbles webhook monitor", () => {
const handled = await handledPromise;
expect(handled).toBe(true);
expect(res.statusCode).toBe(408);
expect(req.destroy).toHaveBeenCalled();
expect(destroyMock).toHaveBeenCalled();
} finally {
vi.useRealTimers();
}
@ -424,140 +436,62 @@ describe("BlueBubbles webhook monitor", () => {
it("rejects unauthorized requests before reading the body", async () => {
const account = createMockAccount({ password: "secret-token" });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const req = new EventEmitter() as IncomingMessage;
req.method = "POST";
req.url = "/bluebubbles-webhook?password=wrong-token";
req.headers = {};
setupWebhookTarget({ account });
const { req } = createHangingWebhookRequest("/bluebubbles-webhook?password=wrong-token");
const onSpy = vi.spyOn(req, "on");
(req as unknown as { socket: { remoteAddress: string } }).socket = {
remoteAddress: "127.0.0.1",
};
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(401);
await expectWebhookStatus(req, 401);
expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function));
});
it("authenticates via password query parameter", async () => {
const account = createMockAccount({ password: "secret-token" });
// Mock non-localhost request
const req = createMockRequest(
"POST",
"/bluebubbles-webhook?password=secret-token",
createNewMessagePayload(),
);
setRequestRemoteAddress(req, "192.168.1.100");
setupWebhookTarget({ account });
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(200);
const req = createWebhookRequestForTest({
url: "/bluebubbles-webhook?password=secret-token",
body: createNewMessagePayload(),
remoteAddress: "192.168.1.100",
});
await expectWebhookStatus(req, 200);
});
it("authenticates via x-password header", async () => {
const account = createMockAccount({ password: "secret-token" });
const req = createMockRequest(
"POST",
"/bluebubbles-webhook",
createNewMessagePayload(),
{ "x-password": "secret-token" }, // pragma: allowlist secret
);
setRequestRemoteAddress(req, "192.168.1.100");
setupWebhookTarget({ account });
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(200);
const req = createWebhookRequestForTest({
body: createNewMessagePayload(),
headers: { "x-password": "secret-token" }, // pragma: allowlist secret
remoteAddress: "192.168.1.100",
});
await expectWebhookStatus(req, 200);
});
it("rejects unauthorized requests with wrong password", async () => {
const account = createMockAccount({ password: "secret-token" });
const req = createMockRequest(
"POST",
"/bluebubbles-webhook?password=wrong-token",
createNewMessagePayload(),
);
setRequestRemoteAddress(req, "192.168.1.100");
setupWebhookTarget({ account });
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(401);
const req = createWebhookRequestForTest({
url: "/bluebubbles-webhook?password=wrong-token",
body: createNewMessagePayload(),
remoteAddress: "192.168.1.100",
});
await expectWebhookStatus(req, 401);
});
it("rejects ambiguous routing when multiple targets match the same password", async () => {
const accountA = createMockAccount({ password: "secret-token" });
const accountB = createMockAccount({ password: "secret-token" });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
const sinkA = vi.fn();
const sinkB = vi.fn();
registerWebhookTargets([
{ account: accountA, statusSink: sinkA },
{ account: accountB, statusSink: sinkB },
]);
const req = createMockRequest("POST", "/bluebubbles-webhook?password=secret-token", {
type: "new-message",
data: {
text: "hello",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-1",
},
});
(req as unknown as { socket: { remoteAddress: string } }).socket = {
const req = createWebhookRequestForTest({
url: "/bluebubbles-webhook?password=secret-token",
body: createNewMessagePayload(),
remoteAddress: "192.168.1.100",
};
const unregisterA = registerBlueBubblesWebhookTarget({
account: accountA,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
statusSink: sinkA,
});
const unregisterB = registerBlueBubblesWebhookTarget({
account: accountB,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
statusSink: sinkB,
});
unregister = () => {
unregisterA();
unregisterB();
};
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(401);
await expectWebhookStatus(req, 401);
expect(sinkA).not.toHaveBeenCalled();
expect(sinkB).not.toHaveBeenCalled();
});
@ -565,107 +499,38 @@ describe("BlueBubbles webhook monitor", () => {
it("ignores targets without passwords when a password-authenticated target matches", async () => {
const accountStrict = createMockAccount({ password: "secret-token" });
const accountWithoutPassword = createMockAccount({ password: undefined });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
const sinkStrict = vi.fn();
const sinkWithoutPassword = vi.fn();
registerWebhookTargets([
{ account: accountStrict, statusSink: sinkStrict },
{ account: accountWithoutPassword, statusSink: sinkWithoutPassword },
]);
const req = createMockRequest("POST", "/bluebubbles-webhook?password=secret-token", {
type: "new-message",
data: {
text: "hello",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-1",
},
});
(req as unknown as { socket: { remoteAddress: string } }).socket = {
const req = createWebhookRequestForTest({
url: "/bluebubbles-webhook?password=secret-token",
body: createNewMessagePayload(),
remoteAddress: "192.168.1.100",
};
const unregisterStrict = registerBlueBubblesWebhookTarget({
account: accountStrict,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
statusSink: sinkStrict,
});
const unregisterNoPassword = registerBlueBubblesWebhookTarget({
account: accountWithoutPassword,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
statusSink: sinkWithoutPassword,
});
unregister = () => {
unregisterStrict();
unregisterNoPassword();
};
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(200);
await expectWebhookStatus(req, 200);
expect(sinkStrict).toHaveBeenCalledTimes(1);
expect(sinkWithoutPassword).not.toHaveBeenCalled();
});
it("requires authentication for loopback requests when password is configured", async () => {
const account = createMockAccount({ password: "secret-token" });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
setupWebhookTarget({ account });
for (const remoteAddress of ["127.0.0.1", "::1", "::ffff:127.0.0.1"]) {
const req = createMockRequest("POST", "/bluebubbles-webhook", {
type: "new-message",
data: {
text: "hello",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-1",
},
});
(req as unknown as { socket: { remoteAddress: string } }).socket = {
const req = createWebhookRequestForTest({
body: createNewMessagePayload(),
remoteAddress,
};
const loopbackUnregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(401);
loopbackUnregister();
await expectWebhookStatus(req, 401);
}
});
it("rejects targets without passwords for loopback and proxied-looking requests", async () => {
const account = createMockAccount({ password: undefined });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
setupWebhookTarget({ account });
const headerVariants: Record<string, string>[] = [
{ host: "localhost" },
@ -673,28 +538,12 @@ describe("BlueBubbles webhook monitor", () => {
{ host: "localhost", forwarded: "for=203.0.113.10;proto=https;host=example.com" },
];
for (const headers of headerVariants) {
const req = createMockRequest(
"POST",
"/bluebubbles-webhook",
{
type: "new-message",
data: {
text: "hello",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-1",
},
},
const req = createWebhookRequestForTest({
body: createNewMessagePayload(),
headers,
);
(req as unknown as { socket: { remoteAddress: string } }).socket = {
remoteAddress: "127.0.0.1",
};
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(401);
});
await expectWebhookStatus(req, 401);
}
});

View File

@ -30,3 +30,11 @@ export async function postMultipartFormData(params: {
params.timeoutMs,
);
}
export async function assertMultipartActionOk(response: Response, action: string): Promise<void> {
if (response.ok) {
return;
}
const errorText = await response.text().catch(() => "");
throw new Error(`BlueBubbles ${action} failed (${response.status}): ${errorText || "unknown"}`);
}

View File

@ -19,7 +19,7 @@ describe("reactions", () => {
});
describe("sendBlueBubblesReaction", () => {
async function expectRemovedReaction(emoji: string) {
async function expectRemovedReaction(emoji: string, expectedReaction = "-love") {
mockFetch.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve(""),
@ -37,7 +37,7 @@ describe("reactions", () => {
});
const body = JSON.parse(mockFetch.mock.calls[0][1].body);
expect(body.reaction).toBe("-love");
expect(body.reaction).toBe(expectedReaction);
}
it("throws when chatGuid is empty", async () => {
@ -327,45 +327,11 @@ describe("reactions", () => {
describe("reaction removal aliases", () => {
it("handles emoji-based removal", async () => {
mockFetch.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve(""),
});
await sendBlueBubblesReaction({
chatGuid: "chat-123",
messageGuid: "msg-123",
emoji: "👍",
remove: true,
opts: {
serverUrl: "http://localhost:1234",
password: "test",
},
});
const body = JSON.parse(mockFetch.mock.calls[0][1].body);
expect(body.reaction).toBe("-like");
await expectRemovedReaction("👍", "-like");
});
it("handles text alias removal", async () => {
mockFetch.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve(""),
});
await sendBlueBubblesReaction({
chatGuid: "chat-123",
messageGuid: "msg-123",
emoji: "haha",
remove: true,
opts: {
serverUrl: "http://localhost:1234",
password: "test",
},
});
const body = JSON.parse(mockFetch.mock.calls[0][1].body);
expect(body.reaction).toBe("-laugh");
await expectRemovedReaction("haha", "-laugh");
});
});
});

View File

@ -108,13 +108,21 @@ function resolveScheme(
return cfg.gateway?.tls?.enabled === true ? "wss" : "ws";
}
function isPrivateIPv4(address: string): boolean {
function parseIPv4Octets(address: string): [number, number, number, number] | null {
const parts = address.split(".");
if (parts.length != 4) {
return false;
if (parts.length !== 4) {
return null;
}
const octets = parts.map((part) => Number.parseInt(part, 10));
if (octets.some((value) => !Number.isFinite(value) || value < 0 || value > 255)) {
return null;
}
return octets as [number, number, number, number];
}
function isPrivateIPv4(address: string): boolean {
const octets = parseIPv4Octets(address);
if (!octets) {
return false;
}
const [a, b] = octets;
@ -131,12 +139,8 @@ function isPrivateIPv4(address: string): boolean {
}
function isTailnetIPv4(address: string): boolean {
const parts = address.split(".");
if (parts.length !== 4) {
return false;
}
const octets = parts.map((part) => Number.parseInt(part, 10));
if (octets.some((value) => !Number.isFinite(value) || value < 0 || value > 255)) {
const octets = parseIPv4Octets(address);
if (!octets) {
return false;
}
const [a, b] = octets;

View File

@ -8,7 +8,7 @@
"build:viewer": "bun build src/viewer-client.ts --target browser --format esm --minify --outfile assets/viewer-runtime.js"
},
"dependencies": {
"@pierre/diffs": "1.0.11",
"@pierre/diffs": "1.1.0",
"@sinclair/typebox": "0.34.48",
"playwright-core": "1.58.2"
},

View File

@ -9,6 +9,19 @@ describe("createDiffsHttpHandler", () => {
let store: DiffArtifactStore;
let cleanupRootDir: () => Promise<void>;
async function handleLocalGet(url: string) {
const handler = createDiffsHttpHandler({ store });
const res = createMockServerResponse();
const handled = await handler(
localReq({
method: "GET",
url,
}),
res,
);
return { handled, res };
}
beforeEach(async () => {
({ store, cleanup: cleanupRootDir } = await createDiffStoreHarness("openclaw-diffs-http-"));
});
@ -19,16 +32,7 @@ describe("createDiffsHttpHandler", () => {
it("serves a stored diff document", async () => {
const artifact = await createViewerArtifact(store);
const handler = createDiffsHttpHandler({ store });
const res = createMockServerResponse();
const handled = await handler(
localReq({
method: "GET",
url: artifact.viewerPath,
}),
res,
);
const { handled, res } = await handleLocalGet(artifact.viewerPath);
expect(handled).toBe(true);
expect(res.statusCode).toBe(200);
@ -38,15 +42,8 @@ describe("createDiffsHttpHandler", () => {
it("rejects invalid tokens", async () => {
const artifact = await createViewerArtifact(store);
const handler = createDiffsHttpHandler({ store });
const res = createMockServerResponse();
const handled = await handler(
localReq({
method: "GET",
url: artifact.viewerPath.replace(artifact.token, "bad-token"),
}),
res,
const { handled, res } = await handleLocalGet(
artifact.viewerPath.replace(artifact.token, "bad-token"),
);
expect(handled).toBe(true);

View File

@ -135,9 +135,7 @@ describe("diffs tool", () => {
mode: "file",
});
expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1);
expect((result?.details as Record<string, unknown>).mode).toBe("file");
expect((result?.details as Record<string, unknown>).viewerUrl).toBeUndefined();
expectArtifactOnlyFileResult(screenshotter, result);
});
it("honors ttlSeconds for artifact-only file output", async () => {
@ -227,9 +225,7 @@ describe("diffs tool", () => {
after: "two\n",
});
expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1);
expect((result?.details as Record<string, unknown>).mode).toBe("file");
expect((result?.details as Record<string, unknown>).viewerUrl).toBeUndefined();
expectArtifactOnlyFileResult(screenshotter, result);
});
it("falls back to view output when both mode cannot render an image", async () => {
@ -434,6 +430,15 @@ function createToolWithScreenshotter(
});
}
function expectArtifactOnlyFileResult(
screenshotter: DiffScreenshotter,
result: { details?: unknown } | null | undefined,
) {
expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1);
expect((result?.details as Record<string, unknown>).mode).toBe("file");
expect((result?.details as Record<string, unknown>).viewerUrl).toBeUndefined();
}
function createPngScreenshotter(
params: {
assertHtml?: (html: string) => void;

View File

@ -75,6 +75,27 @@ function getRequiredHandler(
return handler;
}
function resolveSubagentDeliveryTargetForTest(requesterOrigin: {
channel: string;
accountId: string;
to: string;
threadId?: string;
}) {
const handlers = registerHandlersForTest();
const handler = getRequiredHandler(handlers, "subagent_delivery_target");
return handler(
{
childSessionKey: "agent:main:subagent:child",
requesterSessionKey: "agent:main:main",
requesterOrigin,
childRunId: "run-1",
spawnMode: "session",
expectsCompletionMessage: true,
},
{},
);
}
function createSpawnEvent(overrides?: {
childSessionKey?: string;
agentId?: string;
@ -324,25 +345,12 @@ describe("discord subagent hook handlers", () => {
hookMocks.listThreadBindingsBySessionKey.mockReturnValueOnce([
{ accountId: "work", threadId: "777" },
]);
const handlers = registerHandlersForTest();
const handler = getRequiredHandler(handlers, "subagent_delivery_target");
const result = handler(
{
childSessionKey: "agent:main:subagent:child",
requesterSessionKey: "agent:main:main",
requesterOrigin: {
channel: "discord",
accountId: "work",
to: "channel:123",
threadId: "777",
},
childRunId: "run-1",
spawnMode: "session",
expectsCompletionMessage: true,
},
{},
);
const result = resolveSubagentDeliveryTargetForTest({
channel: "discord",
accountId: "work",
to: "channel:123",
threadId: "777",
});
expect(hookMocks.listThreadBindingsBySessionKey).toHaveBeenCalledWith({
targetSessionKey: "agent:main:subagent:child",
@ -364,24 +372,11 @@ describe("discord subagent hook handlers", () => {
{ accountId: "work", threadId: "777" },
{ accountId: "work", threadId: "888" },
]);
const handlers = registerHandlersForTest();
const handler = getRequiredHandler(handlers, "subagent_delivery_target");
const result = handler(
{
childSessionKey: "agent:main:subagent:child",
requesterSessionKey: "agent:main:main",
requesterOrigin: {
channel: "discord",
accountId: "work",
to: "channel:123",
},
childRunId: "run-1",
spawnMode: "session",
expectsCompletionMessage: true,
},
{},
);
const result = resolveSubagentDeliveryTargetForTest({
channel: "discord",
accountId: "work",
to: "channel:123",
});
expect(result).toBeUndefined();
});

View File

@ -9,6 +9,23 @@ import type { FeishuConfig } from "./types.js";
const asConfig = (value: Partial<FeishuConfig>) => value as FeishuConfig;
function makeDefaultAndRouterAccounts() {
return {
default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret
"router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret
};
}
function expectExplicitDefaultAccountSelection(
account: ReturnType<typeof resolveFeishuAccount>,
appId: string,
) {
expect(account.accountId).toBe("router-d");
expect(account.selectionSource).toBe("explicit-default");
expect(account.configured).toBe(true);
expect(account.appId).toBe(appId);
}
function withEnvVar(key: string, value: string | undefined, run: () => void) {
const prev = process.env[key];
if (value === undefined) {
@ -44,10 +61,7 @@ describe("resolveDefaultFeishuAccountId", () => {
channels: {
feishu: {
defaultAccount: "router-d",
accounts: {
default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret
"router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret
},
accounts: makeDefaultAndRouterAccounts(),
},
},
};
@ -278,10 +292,7 @@ describe("resolveFeishuAccount", () => {
};
const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined });
expect(account.accountId).toBe("router-d");
expect(account.selectionSource).toBe("explicit-default");
expect(account.configured).toBe(true);
expect(account.appId).toBe("top_level_app");
expectExplicitDefaultAccountSelection(account, "top_level_app");
});
it("uses configured default account when accountId is omitted", () => {
@ -298,10 +309,7 @@ describe("resolveFeishuAccount", () => {
};
const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined });
expect(account.accountId).toBe("router-d");
expect(account.selectionSource).toBe("explicit-default");
expect(account.configured).toBe(true);
expect(account.appId).toBe("cli_router");
expectExplicitDefaultAccountSelection(account, "cli_router");
});
it("keeps explicit accountId selection", () => {
@ -309,10 +317,7 @@ describe("resolveFeishuAccount", () => {
channels: {
feishu: {
defaultAccount: "router-d",
accounts: {
default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret
"router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret
},
accounts: makeDefaultAndRouterAccounts(),
},
},
};

View File

@ -1,6 +1,16 @@
import { describe, expect, it } from "vitest";
import { FeishuConfigSchema, FeishuGroupSchema } from "./config-schema.js";
function expectSchemaIssue(
result: ReturnType<typeof FeishuConfigSchema.safeParse>,
issuePath: string,
) {
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues.some((issue) => issue.path.join(".") === issuePath)).toBe(true);
}
}
describe("FeishuConfigSchema webhook validation", () => {
it("applies top-level defaults", () => {
const result = FeishuConfigSchema.parse({});
@ -39,12 +49,7 @@ describe("FeishuConfigSchema webhook validation", () => {
appSecret: "secret_top", // pragma: allowlist secret
});
expect(result.success).toBe(false);
if (!result.success) {
expect(
result.error.issues.some((issue) => issue.path.join(".") === "verificationToken"),
).toBe(true);
}
expectSchemaIssue(result, "verificationToken");
});
it("rejects top-level webhook mode without encryptKey", () => {
@ -55,10 +60,7 @@ describe("FeishuConfigSchema webhook validation", () => {
appSecret: "secret_top", // pragma: allowlist secret
});
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues.some((issue) => issue.path.join(".") === "encryptKey")).toBe(true);
}
expectSchemaIssue(result, "encryptKey");
});
it("accepts top-level webhook mode with verificationToken and encryptKey", () => {
@ -84,14 +86,7 @@ describe("FeishuConfigSchema webhook validation", () => {
},
});
expect(result.success).toBe(false);
if (!result.success) {
expect(
result.error.issues.some(
(issue) => issue.path.join(".") === "accounts.main.verificationToken",
),
).toBe(true);
}
expectSchemaIssue(result, "accounts.main.verificationToken");
});
it("rejects account webhook mode without encryptKey", () => {
@ -106,12 +101,7 @@ describe("FeishuConfigSchema webhook validation", () => {
},
});
expect(result.success).toBe(false);
if (!result.success) {
expect(
result.error.issues.some((issue) => issue.path.join(".") === "accounts.main.encryptKey"),
).toBe(true);
}
expectSchemaIssue(result, "accounts.main.encryptKey");
});
it("accepts account webhook mode inheriting top-level verificationToken and encryptKey", () => {

View File

@ -64,18 +64,21 @@ function expectMediaTimeoutClientConfigured(): void {
);
}
function mockResolvedFeishuAccount() {
resolveFeishuAccountMock.mockReturnValue({
configured: true,
accountId: "main",
config: {},
appId: "app_id",
appSecret: "app_secret",
domain: "feishu",
});
}
describe("sendMediaFeishu msg_type routing", () => {
beforeEach(() => {
vi.clearAllMocks();
resolveFeishuAccountMock.mockReturnValue({
configured: true,
accountId: "main",
config: {},
appId: "app_id",
appSecret: "app_secret",
domain: "feishu",
});
mockResolvedFeishuAccount();
normalizeFeishuTargetMock.mockReturnValue("ou_target");
resolveReceiveIdTypeMock.mockReturnValue("open_id");
@ -381,7 +384,7 @@ describe("sendMediaFeishu msg_type routing", () => {
expect(messageResourceGetMock).not.toHaveBeenCalled();
});
it("encodes Chinese filenames for file uploads", async () => {
it("preserves Chinese filenames for file uploads", async () => {
await sendMediaFeishu({
cfg: {} as any,
to: "user:ou_target",
@ -390,8 +393,7 @@ describe("sendMediaFeishu msg_type routing", () => {
});
const createCall = fileCreateMock.mock.calls[0][0];
expect(createCall.data.file_name).not.toBe("测试文档.pdf");
expect(createCall.data.file_name).toBe(encodeURIComponent("测试文档") + ".pdf");
expect(createCall.data.file_name).toBe("测试文档.pdf");
});
it("preserves ASCII filenames unchanged for file uploads", async () => {
@ -406,7 +408,7 @@ describe("sendMediaFeishu msg_type routing", () => {
expect(createCall.data.file_name).toBe("report-2026.pdf");
});
it("encodes special characters (em-dash, full-width brackets) in filenames", async () => {
it("preserves special Unicode characters (em-dash, full-width brackets) in filenames", async () => {
await sendMediaFeishu({
cfg: {} as any,
to: "user:ou_target",
@ -415,9 +417,7 @@ describe("sendMediaFeishu msg_type routing", () => {
});
const createCall = fileCreateMock.mock.calls[0][0];
expect(createCall.data.file_name).toMatch(/\.md$/);
expect(createCall.data.file_name).not.toContain("—");
expect(createCall.data.file_name).not.toContain("");
expect(createCall.data.file_name).toBe("报告—详情2026.md");
});
});
@ -427,71 +427,48 @@ describe("sanitizeFileNameForUpload", () => {
expect(sanitizeFileNameForUpload("my-file_v2.txt")).toBe("my-file_v2.txt");
});
it("encodes Chinese characters in basename, preserves extension", () => {
const result = sanitizeFileNameForUpload("测试文件.md");
expect(result).toBe(encodeURIComponent("测试文件") + ".md");
expect(result).toMatch(/\.md$/);
it("preserves Chinese characters", () => {
expect(sanitizeFileNameForUpload("测试文件.md")).toBe("测试文件.md");
expect(sanitizeFileNameForUpload("武汉15座山登山信息汇总.csv")).toBe(
"武汉15座山登山信息汇总.csv",
);
});
it("encodes em-dash and full-width brackets", () => {
const result = sanitizeFileNameForUpload("文件—说明v2.pdf");
expect(result).toMatch(/\.pdf$/);
expect(result).not.toContain("—");
expect(result).not.toContain("");
expect(result).not.toContain("");
it("preserves em-dash and full-width brackets", () => {
expect(sanitizeFileNameForUpload("文件—说明v2.pdf")).toBe("文件—说明v2.pdf");
});
it("encodes single quotes and parentheses per RFC 5987", () => {
const result = sanitizeFileNameForUpload("文件'(test).txt");
expect(result).toContain("%27");
expect(result).toContain("%28");
expect(result).toContain("%29");
expect(result).toMatch(/\.txt$/);
it("preserves single quotes and parentheses", () => {
expect(sanitizeFileNameForUpload("文件'(test).txt")).toBe("文件'(test).txt");
});
it("handles filenames without extension", () => {
const result = sanitizeFileNameForUpload("测试文件");
expect(result).toBe(encodeURIComponent("测试文件"));
it("preserves filenames without extension", () => {
expect(sanitizeFileNameForUpload("测试文件")).toBe("测试文件");
});
it("handles mixed ASCII and non-ASCII", () => {
const result = sanitizeFileNameForUpload("Report_报告_2026.xlsx");
expect(result).toMatch(/\.xlsx$/);
expect(result).not.toContain("报告");
it("preserves mixed ASCII and non-ASCII", () => {
expect(sanitizeFileNameForUpload("Report_报告_2026.xlsx")).toBe("Report_报告_2026.xlsx");
});
it("encodes non-ASCII extensions", () => {
const result = sanitizeFileNameForUpload("报告.文档");
expect(result).toContain("%E6%96%87%E6%A1%A3");
expect(result).not.toContain("文档");
it("preserves emoji filenames", () => {
expect(sanitizeFileNameForUpload("report_😀.txt")).toBe("report_😀.txt");
});
it("encodes emoji filenames", () => {
const result = sanitizeFileNameForUpload("report_😀.txt");
expect(result).toContain("%F0%9F%98%80");
expect(result).toMatch(/\.txt$/);
it("strips control characters", () => {
expect(sanitizeFileNameForUpload("bad\x00file.txt")).toBe("bad_file.txt");
expect(sanitizeFileNameForUpload("inject\r\nheader.txt")).toBe("inject__header.txt");
});
it("encodes mixed ASCII and non-ASCII extensions", () => {
const result = sanitizeFileNameForUpload("notes_总结.v测试");
expect(result).toContain("notes_");
expect(result).toContain("%E6%B5%8B%E8%AF%95");
expect(result).not.toContain("测试");
it("strips quotes and backslashes to prevent header injection", () => {
expect(sanitizeFileNameForUpload('file"name.txt')).toBe("file_name.txt");
expect(sanitizeFileNameForUpload("file\\name.txt")).toBe("file_name.txt");
});
});
describe("downloadMessageResourceFeishu", () => {
beforeEach(() => {
vi.clearAllMocks();
resolveFeishuAccountMock.mockReturnValue({
configured: true,
accountId: "main",
config: {},
appId: "app_id",
appSecret: "app_secret",
domain: "feishu",
});
mockResolvedFeishuAccount();
createFeishuClientMock.mockReturnValue({
im: {

View File

@ -22,6 +22,45 @@ export type DownloadMessageResourceResult = {
fileName?: string;
};
function createConfiguredFeishuMediaClient(params: { cfg: ClawdbotConfig; accountId?: string }): {
account: ReturnType<typeof resolveFeishuAccount>;
client: ReturnType<typeof createFeishuClient>;
} {
const account = resolveFeishuAccount({ cfg: params.cfg, accountId: params.accountId });
if (!account.configured) {
throw new Error(`Feishu account "${account.accountId}" not configured`);
}
return {
account,
client: createFeishuClient({
...account,
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
}),
};
}
function extractFeishuUploadKey(
response: unknown,
params: {
key: "image_key" | "file_key";
errorPrefix: string;
},
): string {
// SDK v1.30+ returns data directly without code wrapper on success.
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- SDK response type
const responseAny = response as any;
if (responseAny.code !== undefined && responseAny.code !== 0) {
throw new Error(`${params.errorPrefix}: ${responseAny.msg || `code ${responseAny.code}`}`);
}
const key = responseAny[params.key] ?? responseAny.data?.[params.key];
if (!key) {
throw new Error(`${params.errorPrefix}: no ${params.key} returned`);
}
return key;
}
async function readFeishuResponseBuffer(params: {
response: unknown;
tmpDirPrefix: string;
@ -94,15 +133,7 @@ export async function downloadImageFeishu(params: {
if (!normalizedImageKey) {
throw new Error("Feishu image download failed: invalid image_key");
}
const account = resolveFeishuAccount({ cfg, accountId });
if (!account.configured) {
throw new Error(`Feishu account "${account.accountId}" not configured`);
}
const client = createFeishuClient({
...account,
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
});
const { client } = createConfiguredFeishuMediaClient({ cfg, accountId });
const response = await client.im.image.get({
path: { image_key: normalizedImageKey },
@ -132,15 +163,7 @@ export async function downloadMessageResourceFeishu(params: {
if (!normalizedFileKey) {
throw new Error("Feishu message resource download failed: invalid file_key");
}
const account = resolveFeishuAccount({ cfg, accountId });
if (!account.configured) {
throw new Error(`Feishu account "${account.accountId}" not configured`);
}
const client = createFeishuClient({
...account,
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
});
const { client } = createConfiguredFeishuMediaClient({ cfg, accountId });
const response = await client.im.messageResource.get({
path: { message_id: messageId, file_key: normalizedFileKey },
@ -179,15 +202,7 @@ export async function uploadImageFeishu(params: {
accountId?: string;
}): Promise<UploadImageResult> {
const { cfg, image, imageType = "message", accountId } = params;
const account = resolveFeishuAccount({ cfg, accountId });
if (!account.configured) {
throw new Error(`Feishu account "${account.accountId}" not configured`);
}
const client = createFeishuClient({
...account,
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
});
const { client } = createConfiguredFeishuMediaClient({ cfg, accountId });
// SDK accepts Buffer directly or fs.ReadStream for file paths
// Using Readable.from(buffer) causes issues with form-data library
@ -202,38 +217,26 @@ export async function uploadImageFeishu(params: {
},
});
// SDK v1.30+ returns data directly without code wrapper on success
// On error, it throws or returns { code, msg }
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- SDK response type
const responseAny = response as any;
if (responseAny.code !== undefined && responseAny.code !== 0) {
throw new Error(`Feishu image upload failed: ${responseAny.msg || `code ${responseAny.code}`}`);
}
const imageKey = responseAny.image_key ?? responseAny.data?.image_key;
if (!imageKey) {
throw new Error("Feishu image upload failed: no image_key returned");
}
return { imageKey };
return {
imageKey: extractFeishuUploadKey(response, {
key: "image_key",
errorPrefix: "Feishu image upload failed",
}),
};
}
/**
* Encode a filename for safe use in Feishu multipart/form-data uploads.
* Non-ASCII characters (Chinese, em-dash, full-width brackets, etc.) cause
* the upload to silently fail when passed raw through the SDK's form-data
* serialization. RFC 5987 percent-encoding keeps headers 7-bit clean while
* Feishu's server decodes and preserves the original display name.
* Sanitize a filename for safe use in Feishu multipart/form-data uploads.
* Strips control characters and multipart-injection vectors (CWE-93) while
* preserving the original UTF-8 display name (Chinese, emoji, etc.).
*
* Previous versions percent-encoded non-ASCII characters, but the Feishu
* `im.file.create` API uses `file_name` as a literal display name it does
* NOT decode percent-encoding so encoded filenames appeared as garbled text
* in chat (regression in v2026.3.2).
*/
export function sanitizeFileNameForUpload(fileName: string): string {
const ASCII_ONLY = /^[\x20-\x7E]+$/;
if (ASCII_ONLY.test(fileName)) {
return fileName;
}
return encodeURIComponent(fileName)
.replace(/'/g, "%27")
.replace(/\(/g, "%28")
.replace(/\)/g, "%29");
return fileName.replace(/[\x00-\x1F\x7F\r\n"\\]/g, "_");
}
/**
@ -249,15 +252,7 @@ export async function uploadFileFeishu(params: {
accountId?: string;
}): Promise<UploadFileResult> {
const { cfg, file, fileName, fileType, duration, accountId } = params;
const account = resolveFeishuAccount({ cfg, accountId });
if (!account.configured) {
throw new Error(`Feishu account "${account.accountId}" not configured`);
}
const client = createFeishuClient({
...account,
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
});
const { client } = createConfiguredFeishuMediaClient({ cfg, accountId });
// SDK accepts Buffer directly or fs.ReadStream for file paths
// Using Readable.from(buffer) causes issues with form-data library
@ -276,19 +271,12 @@ export async function uploadFileFeishu(params: {
},
});
// SDK v1.30+ returns data directly without code wrapper on success
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- SDK response type
const responseAny = response as any;
if (responseAny.code !== undefined && responseAny.code !== 0) {
throw new Error(`Feishu file upload failed: ${responseAny.msg || `code ${responseAny.code}`}`);
}
const fileKey = responseAny.file_key ?? responseAny.data?.file_key;
if (!fileKey) {
throw new Error("Feishu file upload failed: no file_key returned");
}
return { fileKey };
return {
fileKey: extractFeishuUploadKey(response, {
key: "file_key",
errorPrefix: "Feishu file upload failed",
}),
};
}
/**

View File

@ -78,6 +78,25 @@ async function resolveReactionWithLookup(params: {
});
}
async function resolveNonBotReaction(params?: { cfg?: ClawdbotConfig; uuid?: () => string }) {
return await resolveReactionSyntheticEvent({
cfg: params?.cfg ?? cfg,
accountId: "default",
event: makeReactionEvent(),
botOpenId: "ou_bot",
fetchMessage: async () => ({
messageId: "om_msg1",
chatId: "oc_group",
chatType: "group",
senderOpenId: "ou_other",
senderType: "user",
content: "hello",
contentType: "text",
}),
...(params?.uuid ? { uuid: params.uuid } : {}),
});
}
type FeishuMention = NonNullable<FeishuMessageEvent["message"]["mentions"]>[number];
function buildDebounceConfig(): ClawdbotConfig {
@ -179,6 +198,19 @@ function getFirstDispatchedEvent(): FeishuMessageEvent {
return firstParams.event;
}
function expectSingleDispatchedEvent(): FeishuMessageEvent {
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
return getFirstDispatchedEvent();
}
function expectParsedFirstDispatchedEvent(botOpenId = "ou_bot") {
const dispatched = expectSingleDispatchedEvent();
return {
dispatched,
parsed: parseFeishuMessageEvent(dispatched, botOpenId),
};
}
function setDedupPassThroughMocks(): void {
vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true);
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true);
@ -203,6 +235,13 @@ async function enqueueDebouncedMessage(
await Promise.resolve();
}
function setStaleRetryMocks(messageId = "om_old") {
vi.spyOn(dedup, "hasRecordedMessage").mockImplementation((key) => key.endsWith(`:${messageId}`));
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockImplementation(
async (currentMessageId) => currentMessageId === messageId,
);
}
describe("resolveReactionSyntheticEvent", () => {
it("filters app self-reactions", async () => {
const event = makeReactionEvent({ operator_type: "app" });
@ -262,28 +301,12 @@ describe("resolveReactionSyntheticEvent", () => {
});
it("filters reactions on non-bot messages", async () => {
const event = makeReactionEvent();
const result = await resolveReactionSyntheticEvent({
cfg,
accountId: "default",
event,
botOpenId: "ou_bot",
fetchMessage: async () => ({
messageId: "om_msg1",
chatId: "oc_group",
chatType: "group",
senderOpenId: "ou_other",
senderType: "user",
content: "hello",
contentType: "text",
}),
});
const result = await resolveNonBotReaction();
expect(result).toBeNull();
});
it("allows non-bot reactions when reactionNotifications is all", async () => {
const event = makeReactionEvent();
const result = await resolveReactionSyntheticEvent({
const result = await resolveNonBotReaction({
cfg: {
channels: {
feishu: {
@ -291,18 +314,6 @@ describe("resolveReactionSyntheticEvent", () => {
},
},
} as ClawdbotConfig,
accountId: "default",
event,
botOpenId: "ou_bot",
fetchMessage: async () => ({
messageId: "om_msg1",
chatId: "oc_group",
chatType: "group",
senderOpenId: "ou_other",
senderType: "user",
content: "hello",
contentType: "text",
}),
uuid: () => "fixed-uuid",
});
expect(result?.message.message_id).toBe("om_msg1:reaction:THUMBSUP:fixed-uuid");
@ -457,8 +468,7 @@ describe("Feishu inbound debounce regressions", () => {
);
await vi.advanceTimersByTimeAsync(25);
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
const dispatched = getFirstDispatchedEvent();
const dispatched = expectSingleDispatchedEvent();
const mergedMentions = dispatched.message.mentions ?? [];
expect(mergedMentions.some((mention) => mention.id.open_id === "ou_bot")).toBe(true);
expect(mergedMentions.some((mention) => mention.id.open_id === "ou_user_a")).toBe(false);
@ -517,9 +527,7 @@ describe("Feishu inbound debounce regressions", () => {
);
await vi.advanceTimersByTimeAsync(25);
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
const dispatched = getFirstDispatchedEvent();
const parsed = parseFeishuMessageEvent(dispatched, "ou_bot");
const { dispatched, parsed } = expectParsedFirstDispatchedEvent();
expect(parsed.mentionedBot).toBe(true);
expect(parsed.mentionTargets).toBeUndefined();
const mergedMentions = dispatched.message.mentions ?? [];
@ -547,19 +555,14 @@ describe("Feishu inbound debounce regressions", () => {
);
await vi.advanceTimersByTimeAsync(25);
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
const dispatched = getFirstDispatchedEvent();
const parsed = parseFeishuMessageEvent(dispatched, "ou_bot");
const { parsed } = expectParsedFirstDispatchedEvent();
expect(parsed.mentionedBot).toBe(true);
});
it("excludes previously processed retries from combined debounce text", async () => {
vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true);
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true);
vi.spyOn(dedup, "hasRecordedMessage").mockImplementation((key) => key.endsWith(":om_old"));
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockImplementation(
async (messageId) => messageId === "om_old",
);
setStaleRetryMocks();
const onMessage = await setupDebounceMonitor();
await onMessage(createTextEvent({ messageId: "om_old", text: "stale" }));
@ -576,8 +579,7 @@ describe("Feishu inbound debounce regressions", () => {
await Promise.resolve();
await vi.advanceTimersByTimeAsync(25);
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
const dispatched = getFirstDispatchedEvent();
const dispatched = expectSingleDispatchedEvent();
expect(dispatched.message.message_id).toBe("om_new_2");
const combined = JSON.parse(dispatched.message.content) as { text?: string };
expect(combined.text).toBe("first\nsecond");
@ -586,10 +588,7 @@ describe("Feishu inbound debounce regressions", () => {
it("uses latest fresh message id when debounce batch ends with stale retry", async () => {
const recordSpy = vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true);
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true);
vi.spyOn(dedup, "hasRecordedMessage").mockImplementation((key) => key.endsWith(":om_old"));
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockImplementation(
async (messageId) => messageId === "om_old",
);
setStaleRetryMocks();
const onMessage = await setupDebounceMonitor();
await onMessage(createTextEvent({ messageId: "om_new", text: "fresh" }));
@ -600,8 +599,7 @@ describe("Feishu inbound debounce regressions", () => {
await Promise.resolve();
await vi.advanceTimersByTimeAsync(25);
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
const dispatched = getFirstDispatchedEvent();
const dispatched = expectSingleDispatchedEvent();
expect(dispatched.message.message_id).toBe("om_new");
const combined = JSON.parse(dispatched.message.content) as { text?: string };
expect(combined.text).toBe("fresh");

View File

@ -3,11 +3,16 @@ import { afterEach, describe, expect, it, vi } from "vitest";
import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js";
const probeFeishuMock = vi.hoisted(() => vi.fn());
const feishuClientMockModule = vi.hoisted(() => ({
vi.mock("./probe.js", () => ({
probeFeishu: probeFeishuMock,
}));
vi.mock("./client.js", () => ({
createFeishuWSClient: vi.fn(() => ({ start: vi.fn() })),
createEventDispatcher: vi.fn(() => ({ register: vi.fn() })),
}));
const feishuRuntimeMockModule = vi.hoisted(() => ({
vi.mock("./runtime.js", () => ({
getFeishuRuntime: () => ({
channel: {
debounce: {
@ -24,13 +29,6 @@ const feishuRuntimeMockModule = vi.hoisted(() => ({
}),
}));
vi.mock("./probe.js", () => ({
probeFeishu: probeFeishuMock,
}));
vi.mock("./client.js", () => feishuClientMockModule);
vi.mock("./runtime.js", () => feishuRuntimeMockModule);
function buildMultiAccountWebsocketConfig(accountIds: string[]): ClawdbotConfig {
return {
channels: {
@ -52,6 +50,12 @@ function buildMultiAccountWebsocketConfig(accountIds: string[]): ClawdbotConfig
} as ClawdbotConfig;
}
async function waitForStartedAccount(started: string[], accountId: string) {
for (let i = 0; i < 10 && !started.includes(accountId); i += 1) {
await Promise.resolve();
}
}
afterEach(() => {
stopFeishuMonitor();
});
@ -116,10 +120,7 @@ describe("Feishu monitor startup preflight", () => {
});
try {
for (let i = 0; i < 10 && !started.includes("beta"); i += 1) {
await Promise.resolve();
}
await waitForStartedAccount(started, "beta");
expect(started).toEqual(["alpha", "beta"]);
expect(started.filter((accountId) => accountId === "alpha")).toHaveLength(1);
} finally {
@ -153,10 +154,7 @@ describe("Feishu monitor startup preflight", () => {
});
try {
for (let i = 0; i < 10 && !started.includes("beta"); i += 1) {
await Promise.resolve();
}
await waitForStartedAccount(started, "beta");
expect(started).toEqual(["alpha", "beta"]);
expect(runtime.error).toHaveBeenCalledWith(
expect.stringContaining("bot info probe timed out"),

View File

@ -1,9 +1,7 @@
import crypto from "node:crypto";
import { createServer } from "node:http";
import type { AddressInfo } from "node:net";
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
import { afterEach, describe, expect, it, vi } from "vitest";
import { createFeishuRuntimeMockModule } from "./monitor.test-mocks.js";
import { withRunningWebhookMonitor } from "./monitor.webhook.test-helpers.js";
const probeFeishuMock = vi.hoisted(() => vi.fn());
@ -23,61 +21,6 @@ vi.mock("./runtime.js", () => createFeishuRuntimeMockModule());
import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js";
async function getFreePort(): Promise<number> {
const server = createServer();
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
const address = server.address() as AddressInfo | null;
if (!address) {
throw new Error("missing server address");
}
await new Promise<void>((resolve) => server.close(() => resolve()));
return address.port;
}
async function waitUntilServerReady(url: string): Promise<void> {
for (let i = 0; i < 50; i += 1) {
try {
const response = await fetch(url, { method: "GET" });
if (response.status >= 200 && response.status < 500) {
return;
}
} catch {
// retry
}
await new Promise((resolve) => setTimeout(resolve, 20));
}
throw new Error(`server did not start: ${url}`);
}
function buildConfig(params: {
accountId: string;
path: string;
port: number;
verificationToken?: string;
encryptKey?: string;
}): ClawdbotConfig {
return {
channels: {
feishu: {
enabled: true,
accounts: {
[params.accountId]: {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "webhook",
webhookHost: "127.0.0.1",
webhookPort: params.port,
webhookPath: params.path,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
},
},
},
},
} as ClawdbotConfig;
}
function signFeishuPayload(params: {
encryptKey: string;
payload: Record<string, unknown>;
@ -107,41 +50,12 @@ function encryptFeishuPayload(encryptKey: string, payload: Record<string, unknow
return Buffer.concat([iv, encrypted]).toString("base64");
}
async function withRunningWebhookMonitor(
params: {
accountId: string;
path: string;
verificationToken: string;
encryptKey: string;
},
run: (url: string) => Promise<void>,
) {
const port = await getFreePort();
const cfg = buildConfig({
accountId: params.accountId,
path: params.path,
port,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
async function postSignedPayload(url: string, payload: Record<string, unknown>) {
return await fetch(url, {
method: "POST",
headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
body: JSON.stringify(payload),
});
const abortController = new AbortController();
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
const monitorPromise = monitorFeishuProvider({
config: cfg,
runtime,
abortSignal: abortController.signal,
});
const url = `http://127.0.0.1:${port}${params.path}`;
await waitUntilServerReady(url);
try {
await run(url);
} finally {
abortController.abort();
await monitorPromise;
}
}
afterEach(() => {
@ -159,6 +73,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const payload = { type: "url_verification", challenge: "challenge-token" };
const response = await fetch(url, {
@ -185,6 +100,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const response = await fetch(url, {
method: "POST",
@ -208,6 +124,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const response = await fetch(url, {
method: "POST",
@ -231,13 +148,10 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const payload = { type: "url_verification", challenge: "challenge-token" };
const response = await fetch(url, {
method: "POST",
headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
body: JSON.stringify(payload),
});
const response = await postSignedPayload(url, payload);
expect(response.status).toBe(200);
await expect(response.json()).resolves.toEqual({ challenge: "challenge-token" });
@ -255,17 +169,14 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const payload = {
schema: "2.0",
header: { event_type: "unknown.event" },
event: {},
};
const response = await fetch(url, {
method: "POST",
headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
body: JSON.stringify(payload),
});
const response = await postSignedPayload(url, payload);
expect(response.status).toBe(200);
expect(await response.text()).toContain("no unknown.event event handle");
@ -283,6 +194,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const payload = {
encrypt: encryptFeishuPayload("encrypt_key", {
@ -290,11 +202,7 @@ describe("Feishu webhook signed-request e2e", () => {
challenge: "encrypted-challenge-token",
}),
};
const response = await fetch(url, {
method: "POST",
headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
body: JSON.stringify(payload),
});
const response = await postSignedPayload(url, payload);
expect(response.status).toBe(200);
await expect(response.json()).resolves.toEqual({

View File

@ -1,11 +1,13 @@
import { createServer } from "node:http";
import type { AddressInfo } from "node:net";
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
import { afterEach, describe, expect, it, vi } from "vitest";
import {
createFeishuClientMockModule,
createFeishuRuntimeMockModule,
} from "./monitor.test-mocks.js";
import {
buildWebhookConfig,
getFreePort,
withRunningWebhookMonitor,
} from "./monitor.webhook.test-helpers.js";
const probeFeishuMock = vi.hoisted(() => vi.fn());
@ -33,98 +35,6 @@ import {
stopFeishuMonitor,
} from "./monitor.js";
async function getFreePort(): Promise<number> {
const server = createServer();
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
const address = server.address() as AddressInfo | null;
if (!address) {
throw new Error("missing server address");
}
await new Promise<void>((resolve) => server.close(() => resolve()));
return address.port;
}
async function waitUntilServerReady(url: string): Promise<void> {
for (let i = 0; i < 50; i += 1) {
try {
const response = await fetch(url, { method: "GET" });
if (response.status >= 200 && response.status < 500) {
return;
}
} catch {
// retry
}
await new Promise((resolve) => setTimeout(resolve, 20));
}
throw new Error(`server did not start: ${url}`);
}
function buildConfig(params: {
accountId: string;
path: string;
port: number;
verificationToken?: string;
encryptKey?: string;
}): ClawdbotConfig {
return {
channels: {
feishu: {
enabled: true,
accounts: {
[params.accountId]: {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "webhook",
webhookHost: "127.0.0.1",
webhookPort: params.port,
webhookPath: params.path,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
},
},
},
},
} as ClawdbotConfig;
}
async function withRunningWebhookMonitor(
params: {
accountId: string;
path: string;
verificationToken: string;
encryptKey: string;
},
run: (url: string) => Promise<void>,
) {
const port = await getFreePort();
const cfg = buildConfig({
accountId: params.accountId,
path: params.path,
port,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
});
const abortController = new AbortController();
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
const monitorPromise = monitorFeishuProvider({
config: cfg,
runtime,
abortSignal: abortController.signal,
});
const url = `http://127.0.0.1:${port}${params.path}`;
await waitUntilServerReady(url);
try {
await run(url);
} finally {
abortController.abort();
await monitorPromise;
}
}
afterEach(() => {
clearFeishuWebhookRateLimitStateForTest();
stopFeishuMonitor();
@ -134,7 +44,7 @@ describe("Feishu webhook security hardening", () => {
it("rejects webhook mode without verificationToken", async () => {
probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" });
const cfg = buildConfig({
const cfg = buildWebhookConfig({
accountId: "missing-token",
path: "/hook-missing-token",
port: await getFreePort(),
@ -148,7 +58,7 @@ describe("Feishu webhook security hardening", () => {
it("rejects webhook mode without encryptKey", async () => {
probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" });
const cfg = buildConfig({
const cfg = buildWebhookConfig({
accountId: "missing-encrypt-key",
path: "/hook-missing-encrypt",
port: await getFreePort(),
@ -167,6 +77,7 @@ describe("Feishu webhook security hardening", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
const response = await fetch(url, {
method: "POST",
@ -189,6 +100,7 @@ describe("Feishu webhook security hardening", () => {
verificationToken: "verify_token",
encryptKey: "encrypt_key",
},
monitorFeishuProvider,
async (url) => {
let saw429 = false;
for (let i = 0; i < 130; i += 1) {

View File

@ -0,0 +1,98 @@
import { createServer } from "node:http";
import type { AddressInfo } from "node:net";
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
import { vi } from "vitest";
import type { monitorFeishuProvider } from "./monitor.js";
export async function getFreePort(): Promise<number> {
const server = createServer();
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
const address = server.address() as AddressInfo | null;
if (!address) {
throw new Error("missing server address");
}
await new Promise<void>((resolve) => server.close(() => resolve()));
return address.port;
}
async function waitUntilServerReady(url: string): Promise<void> {
for (let i = 0; i < 50; i += 1) {
try {
const response = await fetch(url, { method: "GET" });
if (response.status >= 200 && response.status < 500) {
return;
}
} catch {
// retry
}
await new Promise((resolve) => setTimeout(resolve, 20));
}
throw new Error(`server did not start: ${url}`);
}
export function buildWebhookConfig(params: {
accountId: string;
path: string;
port: number;
verificationToken?: string;
encryptKey?: string;
}): ClawdbotConfig {
return {
channels: {
feishu: {
enabled: true,
accounts: {
[params.accountId]: {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "webhook",
webhookHost: "127.0.0.1",
webhookPort: params.port,
webhookPath: params.path,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
},
},
},
},
} as ClawdbotConfig;
}
export async function withRunningWebhookMonitor(
params: {
accountId: string;
path: string;
verificationToken: string;
encryptKey: string;
},
monitor: typeof monitorFeishuProvider,
run: (url: string) => Promise<void>,
) {
const port = await getFreePort();
const cfg = buildWebhookConfig({
accountId: params.accountId,
path: params.path,
port,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
});
const abortController = new AbortController();
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
const monitorPromise = monitor({
config: cfg,
runtime,
abortSignal: abortController.signal,
});
const url = `http://127.0.0.1:${port}${params.path}`;
await waitUntilServerReady(url);
try {
await run(url);
} finally {
abortController.abort();
await monitorPromise;
}
}

View File

@ -29,12 +29,16 @@ vi.mock("./runtime.js", () => ({
import { feishuOutbound } from "./outbound.js";
const sendText = feishuOutbound.sendText!;
function resetOutboundMocks() {
vi.clearAllMocks();
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
}
describe("feishuOutbound.sendText local-image auto-convert", () => {
beforeEach(() => {
vi.clearAllMocks();
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
resetOutboundMocks();
});
async function createTmpImage(ext = ".png"): Promise<{ dir: string; file: string }> {
@ -181,10 +185,7 @@ describe("feishuOutbound.sendText local-image auto-convert", () => {
describe("feishuOutbound.sendText replyToId forwarding", () => {
beforeEach(() => {
vi.clearAllMocks();
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
resetOutboundMocks();
});
it("forwards replyToId as replyToMessageId to sendMessageFeishu", async () => {
@ -249,10 +250,7 @@ describe("feishuOutbound.sendText replyToId forwarding", () => {
describe("feishuOutbound.sendMedia replyToId forwarding", () => {
beforeEach(() => {
vi.clearAllMocks();
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
resetOutboundMocks();
});
it("forwards replyToId to sendMediaFeishu", async () => {
@ -292,10 +290,7 @@ describe("feishuOutbound.sendMedia replyToId forwarding", () => {
describe("feishuOutbound.sendMedia renderMode", () => {
beforeEach(() => {
vi.clearAllMocks();
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
resetOutboundMocks();
});
it("uses markdown cards for captions when renderMode=card", async () => {

View File

@ -8,6 +8,22 @@ vi.mock("./client.js", () => ({
import { FEISHU_PROBE_REQUEST_TIMEOUT_MS, probeFeishu, clearProbeCache } from "./probe.js";
const DEFAULT_CREDS = { appId: "cli_123", appSecret: "secret" } as const; // pragma: allowlist secret
const DEFAULT_SUCCESS_RESPONSE = {
code: 0,
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
} as const;
const DEFAULT_SUCCESS_RESULT = {
ok: true,
appId: "cli_123",
botName: "TestBot",
botOpenId: "ou_abc123",
} as const;
const BOT1_RESPONSE = {
code: 0,
bot: { bot_name: "Bot1", open_id: "ou_1" },
} as const;
function makeRequestFn(response: Record<string, unknown>) {
return vi.fn().mockResolvedValue(response);
}
@ -18,6 +34,64 @@ function setupClient(response: Record<string, unknown>) {
return requestFn;
}
function setupSuccessClient() {
return setupClient(DEFAULT_SUCCESS_RESPONSE);
}
async function expectDefaultSuccessResult(
creds = DEFAULT_CREDS,
expected: Awaited<ReturnType<typeof probeFeishu>> = DEFAULT_SUCCESS_RESULT,
) {
const result = await probeFeishu(creds);
expect(result).toEqual(expected);
}
async function withFakeTimers(run: () => Promise<void>) {
vi.useFakeTimers();
try {
await run();
} finally {
vi.useRealTimers();
}
}
async function expectErrorResultCached(params: {
requestFn: ReturnType<typeof vi.fn>;
expectedError: string;
ttlMs: number;
}) {
createFeishuClientMock.mockReturnValue({ request: params.requestFn });
const first = await probeFeishu(DEFAULT_CREDS);
const second = await probeFeishu(DEFAULT_CREDS);
expect(first).toMatchObject({ ok: false, error: params.expectedError });
expect(second).toMatchObject({ ok: false, error: params.expectedError });
expect(params.requestFn).toHaveBeenCalledTimes(1);
vi.advanceTimersByTime(params.ttlMs + 1);
await probeFeishu(DEFAULT_CREDS);
expect(params.requestFn).toHaveBeenCalledTimes(2);
}
async function expectFreshDefaultProbeAfter(
requestFn: ReturnType<typeof vi.fn>,
invalidate: () => void,
) {
await probeFeishu(DEFAULT_CREDS);
expect(requestFn).toHaveBeenCalledTimes(1);
invalidate();
await probeFeishu(DEFAULT_CREDS);
expect(requestFn).toHaveBeenCalledTimes(2);
}
async function readSequentialDefaultProbePair() {
const first = await probeFeishu(DEFAULT_CREDS);
return { first, second: await probeFeishu(DEFAULT_CREDS) };
}
describe("probeFeishu", () => {
beforeEach(() => {
clearProbeCache();
@ -44,28 +118,16 @@ describe("probeFeishu", () => {
});
it("returns bot info on successful probe", async () => {
const requestFn = setupClient({
code: 0,
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
});
const requestFn = setupSuccessClient();
const result = await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret
expect(result).toEqual({
ok: true,
appId: "cli_123",
botName: "TestBot",
botOpenId: "ou_abc123",
});
await expectDefaultSuccessResult();
expect(requestFn).toHaveBeenCalledTimes(1);
});
it("passes the probe timeout to the Feishu request", async () => {
const requestFn = setupClient({
code: 0,
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
});
const requestFn = setupSuccessClient();
await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret
await probeFeishu(DEFAULT_CREDS);
expect(requestFn).toHaveBeenCalledWith(
expect.objectContaining({
@ -77,19 +139,16 @@ describe("probeFeishu", () => {
});
it("returns timeout error when request exceeds timeout", async () => {
vi.useFakeTimers();
try {
await withFakeTimers(async () => {
const requestFn = vi.fn().mockImplementation(() => new Promise(() => {}));
createFeishuClientMock.mockReturnValue({ request: requestFn });
const promise = probeFeishu({ appId: "cli_123", appSecret: "secret" }, { timeoutMs: 1_000 });
const promise = probeFeishu(DEFAULT_CREDS, { timeoutMs: 1_000 });
await vi.advanceTimersByTimeAsync(1_000);
const result = await promise;
expect(result).toMatchObject({ ok: false, error: "probe timed out after 1000ms" });
} finally {
vi.useRealTimers();
}
});
});
it("returns aborted when abort signal is already aborted", async () => {
@ -106,14 +165,9 @@ describe("probeFeishu", () => {
expect(createFeishuClientMock).not.toHaveBeenCalled();
});
it("returns cached result on subsequent calls within TTL", async () => {
const requestFn = setupClient({
code: 0,
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
});
const requestFn = setupSuccessClient();
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
const first = await probeFeishu(creds);
const second = await probeFeishu(creds);
const { first, second } = await readSequentialDefaultProbePair();
expect(first).toEqual(second);
// Only one API call should have been made
@ -121,76 +175,37 @@ describe("probeFeishu", () => {
});
it("makes a fresh API call after cache expires", async () => {
vi.useFakeTimers();
try {
const requestFn = setupClient({
code: 0,
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
await withFakeTimers(async () => {
const requestFn = setupSuccessClient();
await expectFreshDefaultProbeAfter(requestFn, () => {
vi.advanceTimersByTime(10 * 60 * 1000 + 1);
});
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(1);
// Advance time past the success TTL
vi.advanceTimersByTime(10 * 60 * 1000 + 1);
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(2);
} finally {
vi.useRealTimers();
}
});
});
it("caches failed probe results (API error) for the error TTL", async () => {
vi.useFakeTimers();
try {
const requestFn = makeRequestFn({ code: 99, msg: "token expired" });
createFeishuClientMock.mockReturnValue({ request: requestFn });
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
const first = await probeFeishu(creds);
const second = await probeFeishu(creds);
expect(first).toMatchObject({ ok: false, error: "API error: token expired" });
expect(second).toMatchObject({ ok: false, error: "API error: token expired" });
expect(requestFn).toHaveBeenCalledTimes(1);
vi.advanceTimersByTime(60 * 1000 + 1);
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(2);
} finally {
vi.useRealTimers();
}
await withFakeTimers(async () => {
await expectErrorResultCached({
requestFn: makeRequestFn({ code: 99, msg: "token expired" }),
expectedError: "API error: token expired",
ttlMs: 60 * 1000,
});
});
});
it("caches thrown request errors for the error TTL", async () => {
vi.useFakeTimers();
try {
const requestFn = vi.fn().mockRejectedValue(new Error("network error"));
createFeishuClientMock.mockReturnValue({ request: requestFn });
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
const first = await probeFeishu(creds);
const second = await probeFeishu(creds);
expect(first).toMatchObject({ ok: false, error: "network error" });
expect(second).toMatchObject({ ok: false, error: "network error" });
expect(requestFn).toHaveBeenCalledTimes(1);
vi.advanceTimersByTime(60 * 1000 + 1);
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(2);
} finally {
vi.useRealTimers();
}
await withFakeTimers(async () => {
await expectErrorResultCached({
requestFn: vi.fn().mockRejectedValue(new Error("network error")),
expectedError: "network error",
ttlMs: 60 * 1000,
});
});
});
it("caches per account independently", async () => {
const requestFn = setupClient({
code: 0,
bot: { bot_name: "Bot1", open_id: "ou_1" },
});
const requestFn = setupClient(BOT1_RESPONSE);
await probeFeishu({ appId: "cli_aaa", appSecret: "s1" }); // pragma: allowlist secret
expect(requestFn).toHaveBeenCalledTimes(1);
@ -205,10 +220,7 @@ describe("probeFeishu", () => {
});
it("does not share cache between accounts with same appId but different appSecret", async () => {
const requestFn = setupClient({
code: 0,
bot: { bot_name: "Bot1", open_id: "ou_1" },
});
const requestFn = setupClient(BOT1_RESPONSE);
// First account with appId + secret A
await probeFeishu({ appId: "cli_shared", appSecret: "secret_aaa" }); // pragma: allowlist secret
@ -221,10 +233,7 @@ describe("probeFeishu", () => {
});
it("uses accountId for cache key when available", async () => {
const requestFn = setupClient({
code: 0,
bot: { bot_name: "Bot1", open_id: "ou_1" },
});
const requestFn = setupClient(BOT1_RESPONSE);
// Two accounts with same appId+appSecret but different accountIds are cached separately
await probeFeishu({ accountId: "acct-1", appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret
@ -239,19 +248,11 @@ describe("probeFeishu", () => {
});
it("clearProbeCache forces fresh API call", async () => {
const requestFn = setupClient({
code: 0,
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
const requestFn = setupSuccessClient();
await expectFreshDefaultProbeAfter(requestFn, () => {
clearProbeCache();
});
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(1);
clearProbeCache();
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(2);
});
it("handles response.data.bot fallback path", async () => {
@ -260,10 +261,8 @@ describe("probeFeishu", () => {
data: { bot: { bot_name: "DataBot", open_id: "ou_data" } },
});
const result = await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret
expect(result).toEqual({
ok: true,
appId: "cli_123",
await expectDefaultSuccessResult(DEFAULT_CREDS, {
...DEFAULT_SUCCESS_RESULT,
botName: "DataBot",
botOpenId: "ou_data",
});

View File

@ -9,6 +9,20 @@ export type FeishuReaction = {
operatorId: string;
};
function resolveConfiguredFeishuClient(params: { cfg: ClawdbotConfig; accountId?: string }) {
const account = resolveFeishuAccount(params);
if (!account.configured) {
throw new Error(`Feishu account "${account.accountId}" not configured`);
}
return createFeishuClient(account);
}
function assertFeishuReactionApiSuccess(response: { code?: number; msg?: string }, action: string) {
if (response.code !== 0) {
throw new Error(`Feishu ${action} failed: ${response.msg || `code ${response.code}`}`);
}
}
/**
* Add a reaction (emoji) to a message.
* @param emojiType - Feishu emoji type, e.g., "SMILE", "THUMBSUP", "HEART"
@ -21,12 +35,7 @@ export async function addReactionFeishu(params: {
accountId?: string;
}): Promise<{ reactionId: string }> {
const { cfg, messageId, emojiType, accountId } = params;
const account = resolveFeishuAccount({ cfg, accountId });
if (!account.configured) {
throw new Error(`Feishu account "${account.accountId}" not configured`);
}
const client = createFeishuClient(account);
const client = resolveConfiguredFeishuClient({ cfg, accountId });
const response = (await client.im.messageReaction.create({
path: { message_id: messageId },
@ -41,9 +50,7 @@ export async function addReactionFeishu(params: {
data?: { reaction_id?: string };
};
if (response.code !== 0) {
throw new Error(`Feishu add reaction failed: ${response.msg || `code ${response.code}`}`);
}
assertFeishuReactionApiSuccess(response, "add reaction");
const reactionId = response.data?.reaction_id;
if (!reactionId) {
@ -63,12 +70,7 @@ export async function removeReactionFeishu(params: {
accountId?: string;
}): Promise<void> {
const { cfg, messageId, reactionId, accountId } = params;
const account = resolveFeishuAccount({ cfg, accountId });
if (!account.configured) {
throw new Error(`Feishu account "${account.accountId}" not configured`);
}
const client = createFeishuClient(account);
const client = resolveConfiguredFeishuClient({ cfg, accountId });
const response = (await client.im.messageReaction.delete({
path: {
@ -77,9 +79,7 @@ export async function removeReactionFeishu(params: {
},
})) as { code?: number; msg?: string };
if (response.code !== 0) {
throw new Error(`Feishu remove reaction failed: ${response.msg || `code ${response.code}`}`);
}
assertFeishuReactionApiSuccess(response, "remove reaction");
}
/**
@ -92,12 +92,7 @@ export async function listReactionsFeishu(params: {
accountId?: string;
}): Promise<FeishuReaction[]> {
const { cfg, messageId, emojiType, accountId } = params;
const account = resolveFeishuAccount({ cfg, accountId });
if (!account.configured) {
throw new Error(`Feishu account "${account.accountId}" not configured`);
}
const client = createFeishuClient(account);
const client = resolveConfiguredFeishuClient({ cfg, accountId });
const response = (await client.im.messageReaction.list({
path: { message_id: messageId },
@ -115,9 +110,7 @@ export async function listReactionsFeishu(params: {
};
};
if (response.code !== 0) {
throw new Error(`Feishu list reactions failed: ${response.msg || `code ${response.code}`}`);
}
assertFeishuReactionApiSuccess(response, "list reactions");
const items = response.data?.items ?? [];
return items.map((item) => ({

View File

@ -25,44 +25,33 @@ vi.mock("./typing.js", () => ({
addTypingIndicator: addTypingIndicatorMock,
removeTypingIndicator: removeTypingIndicatorMock,
}));
vi.mock("./streaming-card.js", () => ({
mergeStreamingText: (previousText: string | undefined, nextText: string | undefined) => {
const previous = typeof previousText === "string" ? previousText : "";
const next = typeof nextText === "string" ? nextText : "";
if (!next) {
return previous;
}
if (!previous || next === previous) {
return next;
}
if (next.startsWith(previous)) {
return next;
}
if (previous.startsWith(next)) {
return previous;
}
return `${previous}${next}`;
},
FeishuStreamingSession: class {
active = false;
start = vi.fn(async () => {
this.active = true;
});
update = vi.fn(async () => {});
close = vi.fn(async () => {
this.active = false;
});
isActive = vi.fn(() => this.active);
vi.mock("./streaming-card.js", async () => {
const actual = await vi.importActual<typeof import("./streaming-card.js")>("./streaming-card.js");
return {
mergeStreamingText: actual.mergeStreamingText,
FeishuStreamingSession: class {
active = false;
start = vi.fn(async () => {
this.active = true;
});
update = vi.fn(async () => {});
close = vi.fn(async () => {
this.active = false;
});
isActive = vi.fn(() => this.active);
constructor() {
streamingInstances.push(this);
}
},
}));
constructor() {
streamingInstances.push(this);
}
},
};
});
import { createFeishuReplyDispatcher } from "./reply-dispatcher.js";
describe("createFeishuReplyDispatcher streaming behavior", () => {
type ReplyDispatcherArgs = Parameters<typeof createFeishuReplyDispatcher>[0];
beforeEach(() => {
vi.clearAllMocks();
streamingInstances.length = 0;
@ -128,6 +117,25 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
return createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
}
function createRuntimeLogger() {
return { log: vi.fn(), error: vi.fn() } as never;
}
function createDispatcherHarness(overrides: Partial<ReplyDispatcherArgs> = {}) {
const result = createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
...overrides,
});
return {
result,
options: createReplyDispatcherWithTypingMock.mock.calls.at(-1)?.[0],
};
}
it("skips typing indicator when account typingIndicator is disabled", async () => {
resolveFeishuAccountMock.mockReturnValue({
accountId: "main",
@ -209,14 +217,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("keeps auto mode plain text on non-streaming send path", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
const { options } = createDispatcherHarness();
await options.deliver({ text: "plain text" }, { kind: "final" });
expect(streamingInstances).toHaveLength(0);
@ -225,14 +226,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("suppresses internal block payload delivery", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
const { options } = createDispatcherHarness();
await options.deliver({ text: "internal reasoning chunk" }, { kind: "block" });
expect(streamingInstances).toHaveLength(0);
@ -253,15 +247,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("uses streaming session for auto mode markdown payloads", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
const { options } = createDispatcherHarness({
runtime: createRuntimeLogger(),
rootId: "om_root_topic",
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" });
expect(streamingInstances).toHaveLength(1);
@ -277,14 +266,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("closes streaming with block text when final reply is missing", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
const { options } = createDispatcherHarness({
runtime: createRuntimeLogger(),
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```md\npartial answer\n```" }, { kind: "block" });
await options.onIdle?.();
@ -295,14 +279,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("delivers distinct final payloads after streaming close", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
const { options } = createDispatcherHarness({
runtime: createRuntimeLogger(),
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```md\n完整回复第一段\n```" }, { kind: "final" });
await options.deliver({ text: "```md\n完整回复第一段 + 第二段\n```" }, { kind: "final" });
@ -316,14 +295,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("skips exact duplicate final text after streaming close", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
const { options } = createDispatcherHarness({
runtime: createRuntimeLogger(),
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" });
await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" });
@ -383,14 +357,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
},
});
const result = createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
const { result, options } = createDispatcherHarness({
runtime: createRuntimeLogger(),
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.onReplyStart?.();
await result.replyOptions.onPartialReply?.({ text: "hello" });
await options.deliver({ text: "lo world" }, { kind: "block" });
@ -402,14 +371,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("sends media-only payloads as attachments", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
const { options } = createDispatcherHarness();
await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" });
expect(sendMediaFeishuMock).toHaveBeenCalledTimes(1);
@ -424,14 +386,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("falls back to legacy mediaUrl when mediaUrls is an empty array", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
const { options } = createDispatcherHarness();
await options.deliver(
{ text: "caption", mediaUrl: "https://example.com/a.png", mediaUrls: [] },
{ kind: "final" },
@ -447,14 +402,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("sends attachments after streaming final markdown replies", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
const { options } = createDispatcherHarness({
runtime: createRuntimeLogger(),
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver(
{ text: "```ts\nconst x = 1\n```", mediaUrls: ["https://example.com/a.png"] },
{ kind: "final" },
@ -472,16 +422,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("passes replyInThread to sendMessageFeishu for plain text", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
const { options } = createDispatcherHarness({
replyToMessageId: "om_msg",
replyInThread: true,
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "plain text" }, { kind: "final" });
expect(sendMessageFeishuMock).toHaveBeenCalledWith(
@ -504,16 +448,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
},
});
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
const { options } = createDispatcherHarness({
replyToMessageId: "om_msg",
replyInThread: true,
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "card text" }, { kind: "final" });
expect(sendMarkdownCardFeishuMock).toHaveBeenCalledWith(
@ -525,16 +463,11 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("passes replyToMessageId and replyInThread to streaming.start()", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
const { options } = createDispatcherHarness({
runtime: createRuntimeLogger(),
replyToMessageId: "om_msg",
replyInThread: true,
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" });
expect(streamingInstances).toHaveLength(1);
@ -545,18 +478,13 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("disables streaming for thread replies and keeps reply metadata", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
const { options } = createDispatcherHarness({
runtime: createRuntimeLogger(),
replyToMessageId: "om_msg",
replyInThread: false,
threadReply: true,
rootId: "om_root_topic",
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" });
expect(streamingInstances).toHaveLength(0);
@ -569,16 +497,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
});
it("passes replyInThread to media attachments", async () => {
createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
const { options } = createDispatcherHarness({
replyToMessageId: "om_msg",
replyInThread: true,
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" });
expect(sendMediaFeishuMock).toHaveBeenCalledWith(

View File

@ -224,6 +224,41 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
lastPartial = "";
};
const sendChunkedTextReply = async (params: {
text: string;
useCard: boolean;
infoKind?: string;
}) => {
let first = true;
const chunkSource = params.useCard
? params.text
: core.channel.text.convertMarkdownTables(params.text, tableMode);
for (const chunk of core.channel.text.chunkTextWithMode(
chunkSource,
textChunkLimit,
chunkMode,
)) {
const message = {
cfg,
to: chatId,
text: chunk,
replyToMessageId: sendReplyToMessageId,
replyInThread: effectiveReplyInThread,
mentions: first ? mentionTargets : undefined,
accountId,
};
if (params.useCard) {
await sendMarkdownCardFeishu(message);
} else {
await sendMessageFeishu(message);
}
first = false;
}
if (params.infoKind === "final") {
deliveredFinalTexts.add(params.text);
}
};
const { dispatcher, replyOptions, markDispatchIdle } =
core.channel.reply.createReplyDispatcherWithTyping({
responsePrefix: prefixContext.responsePrefix,
@ -303,48 +338,10 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
return;
}
let first = true;
if (useCard) {
for (const chunk of core.channel.text.chunkTextWithMode(
text,
textChunkLimit,
chunkMode,
)) {
await sendMarkdownCardFeishu({
cfg,
to: chatId,
text: chunk,
replyToMessageId: sendReplyToMessageId,
replyInThread: effectiveReplyInThread,
mentions: first ? mentionTargets : undefined,
accountId,
});
first = false;
}
if (info?.kind === "final") {
deliveredFinalTexts.add(text);
}
await sendChunkedTextReply({ text, useCard: true, infoKind: info?.kind });
} else {
const converted = core.channel.text.convertMarkdownTables(text, tableMode);
for (const chunk of core.channel.text.chunkTextWithMode(
converted,
textChunkLimit,
chunkMode,
)) {
await sendMessageFeishu({
cfg,
to: chatId,
text: chunk,
replyToMessageId: sendReplyToMessageId,
replyInThread: effectiveReplyInThread,
mentions: first ? mentionTargets : undefined,
accountId,
});
first = false;
}
if (info?.kind === "final") {
deliveredFinalTexts.add(text);
}
await sendChunkedTextReply({ text, useCard: false, infoKind: info?.kind });
}
}

View File

@ -25,6 +25,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
const replyMock = vi.fn();
const createMock = vi.fn();
async function expectFallbackResult(
send: () => Promise<{ messageId?: string }>,
expectedMessageId: string,
) {
const result = await send();
expect(replyMock).toHaveBeenCalledTimes(1);
expect(createMock).toHaveBeenCalledTimes(1);
expect(result.messageId).toBe(expectedMessageId);
}
beforeEach(() => {
vi.clearAllMocks();
resolveFeishuSendTargetMock.mockReturnValue({
@ -51,16 +61,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
data: { message_id: "om_new" },
});
const result = await sendMessageFeishu({
cfg: {} as never,
to: "user:ou_target",
text: "hello",
replyToMessageId: "om_parent",
});
expect(replyMock).toHaveBeenCalledTimes(1);
expect(createMock).toHaveBeenCalledTimes(1);
expect(result.messageId).toBe("om_new");
await expectFallbackResult(
() =>
sendMessageFeishu({
cfg: {} as never,
to: "user:ou_target",
text: "hello",
replyToMessageId: "om_parent",
}),
"om_new",
);
});
it("falls back to create for withdrawn card replies", async () => {
@ -73,16 +83,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
data: { message_id: "om_card_new" },
});
const result = await sendCardFeishu({
cfg: {} as never,
to: "user:ou_target",
card: { schema: "2.0" },
replyToMessageId: "om_parent",
});
expect(replyMock).toHaveBeenCalledTimes(1);
expect(createMock).toHaveBeenCalledTimes(1);
expect(result.messageId).toBe("om_card_new");
await expectFallbackResult(
() =>
sendCardFeishu({
cfg: {} as never,
to: "user:ou_target",
card: { schema: "2.0" },
replyToMessageId: "om_parent",
}),
"om_card_new",
);
});
it("still throws for non-withdrawn reply failures", async () => {
@ -111,16 +121,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
data: { message_id: "om_thrown_fallback" },
});
const result = await sendMessageFeishu({
cfg: {} as never,
to: "user:ou_target",
text: "hello",
replyToMessageId: "om_parent",
});
expect(replyMock).toHaveBeenCalledTimes(1);
expect(createMock).toHaveBeenCalledTimes(1);
expect(result.messageId).toBe("om_thrown_fallback");
await expectFallbackResult(
() =>
sendMessageFeishu({
cfg: {} as never,
to: "user:ou_target",
text: "hello",
replyToMessageId: "om_parent",
}),
"om_thrown_fallback",
);
});
it("falls back to create when card reply throws a not-found AxiosError", async () => {
@ -133,16 +143,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
data: { message_id: "om_axios_fallback" },
});
const result = await sendCardFeishu({
cfg: {} as never,
to: "user:ou_target",
card: { schema: "2.0" },
replyToMessageId: "om_parent",
});
expect(replyMock).toHaveBeenCalledTimes(1);
expect(createMock).toHaveBeenCalledTimes(1);
expect(result.messageId).toBe("om_axios_fallback");
await expectFallbackResult(
() =>
sendCardFeishu({
cfg: {} as never,
to: "user:ou_target",
card: { schema: "2.0" },
replyToMessageId: "om_parent",
}),
"om_axios_fallback",
);
});
it("re-throws non-withdrawn thrown errors for text messages", async () => {

View File

@ -43,6 +43,10 @@ function isWithdrawnReplyError(err: unknown): boolean {
type FeishuCreateMessageClient = {
im: {
message: {
reply: (opts: {
path: { message_id: string };
data: { content: string; msg_type: string; reply_in_thread?: true };
}) => Promise<{ code?: number; msg?: string; data?: { message_id?: string } }>;
create: (opts: {
params: { receive_id_type: "chat_id" | "email" | "open_id" | "union_id" | "user_id" };
data: { receive_id: string; content: string; msg_type: string };
@ -51,6 +55,30 @@ type FeishuCreateMessageClient = {
};
};
type FeishuMessageSender = {
id?: string;
id_type?: string;
sender_type?: string;
};
type FeishuMessageGetItem = {
message_id?: string;
chat_id?: string;
chat_type?: FeishuChatType;
msg_type?: string;
body?: { content?: string };
sender?: FeishuMessageSender;
create_time?: string;
};
type FeishuGetMessageResponse = {
code?: number;
msg?: string;
data?: FeishuMessageGetItem & {
items?: FeishuMessageGetItem[];
};
};
/** Send a direct message as a fallback when a reply target is unavailable. */
async function sendFallbackDirect(
client: FeishuCreateMessageClient,
@ -74,6 +102,50 @@ async function sendFallbackDirect(
return toFeishuSendResult(response, params.receiveId);
}
async function sendReplyOrFallbackDirect(
client: FeishuCreateMessageClient,
params: {
replyToMessageId?: string;
replyInThread?: boolean;
content: string;
msgType: string;
directParams: {
receiveId: string;
receiveIdType: "chat_id" | "email" | "open_id" | "union_id" | "user_id";
content: string;
msgType: string;
};
directErrorPrefix: string;
replyErrorPrefix: string;
},
): Promise<FeishuSendResult> {
if (!params.replyToMessageId) {
return sendFallbackDirect(client, params.directParams, params.directErrorPrefix);
}
let response: { code?: number; msg?: string; data?: { message_id?: string } };
try {
response = await client.im.message.reply({
path: { message_id: params.replyToMessageId },
data: {
content: params.content,
msg_type: params.msgType,
...(params.replyInThread ? { reply_in_thread: true } : {}),
},
});
} catch (err) {
if (!isWithdrawnReplyError(err)) {
throw err;
}
return sendFallbackDirect(client, params.directParams, params.directErrorPrefix);
}
if (shouldFallbackFromReplyTarget(response)) {
return sendFallbackDirect(client, params.directParams, params.directErrorPrefix);
}
assertFeishuMessageApiSuccess(response, params.replyErrorPrefix);
return toFeishuSendResult(response, params.directParams.receiveId);
}
function parseInteractiveCardContent(parsed: unknown): string {
if (!parsed || typeof parsed !== "object") {
return "[Interactive Card]";
@ -166,36 +238,7 @@ export async function getMessageFeishu(params: {
try {
const response = (await client.im.message.get({
path: { message_id: messageId },
})) as {
code?: number;
msg?: string;
data?: {
items?: Array<{
message_id?: string;
chat_id?: string;
chat_type?: FeishuChatType;
msg_type?: string;
body?: { content?: string };
sender?: {
id?: string;
id_type?: string;
sender_type?: string;
};
create_time?: string;
}>;
message_id?: string;
chat_id?: string;
chat_type?: FeishuChatType;
msg_type?: string;
body?: { content?: string };
sender?: {
id?: string;
id_type?: string;
sender_type?: string;
};
create_time?: string;
};
};
})) as FeishuGetMessageResponse;
if (response.code !== 0) {
return null;
@ -290,32 +333,15 @@ export async function sendMessageFeishu(
const { content, msgType } = buildFeishuPostMessagePayload({ messageText });
const directParams = { receiveId, receiveIdType, content, msgType };
if (replyToMessageId) {
let response: { code?: number; msg?: string; data?: { message_id?: string } };
try {
response = await client.im.message.reply({
path: { message_id: replyToMessageId },
data: {
content,
msg_type: msgType,
...(replyInThread ? { reply_in_thread: true } : {}),
},
});
} catch (err) {
if (!isWithdrawnReplyError(err)) {
throw err;
}
return sendFallbackDirect(client, directParams, "Feishu send failed");
}
if (shouldFallbackFromReplyTarget(response)) {
return sendFallbackDirect(client, directParams, "Feishu send failed");
}
assertFeishuMessageApiSuccess(response, "Feishu reply failed");
return toFeishuSendResult(response, receiveId);
}
return sendFallbackDirect(client, directParams, "Feishu send failed");
return sendReplyOrFallbackDirect(client, {
replyToMessageId,
replyInThread,
content,
msgType,
directParams,
directErrorPrefix: "Feishu send failed",
replyErrorPrefix: "Feishu reply failed",
});
}
export type SendFeishuCardParams = {
@ -334,32 +360,15 @@ export async function sendCardFeishu(params: SendFeishuCardParams): Promise<Feis
const content = JSON.stringify(card);
const directParams = { receiveId, receiveIdType, content, msgType: "interactive" };
if (replyToMessageId) {
let response: { code?: number; msg?: string; data?: { message_id?: string } };
try {
response = await client.im.message.reply({
path: { message_id: replyToMessageId },
data: {
content,
msg_type: "interactive",
...(replyInThread ? { reply_in_thread: true } : {}),
},
});
} catch (err) {
if (!isWithdrawnReplyError(err)) {
throw err;
}
return sendFallbackDirect(client, directParams, "Feishu card send failed");
}
if (shouldFallbackFromReplyTarget(response)) {
return sendFallbackDirect(client, directParams, "Feishu card send failed");
}
assertFeishuMessageApiSuccess(response, "Feishu card reply failed");
return toFeishuSendResult(response, receiveId);
}
return sendFallbackDirect(client, directParams, "Feishu card send failed");
return sendReplyOrFallbackDirect(client, {
replyToMessageId,
replyInThread,
content,
msgType: "interactive",
directParams,
directErrorPrefix: "Feishu card send failed",
replyErrorPrefix: "Feishu card reply failed",
});
}
export async function updateCardFeishu(params: {

View File

@ -144,6 +144,13 @@ describe("extractGeminiCliCredentials", () => {
}
}
function expectFakeCliCredentials(result: unknown) {
expect(result).toEqual({
clientId: FAKE_CLIENT_ID,
clientSecret: FAKE_CLIENT_SECRET,
});
}
beforeEach(async () => {
vi.clearAllMocks();
originalPath = process.env.PATH;
@ -169,10 +176,7 @@ describe("extractGeminiCliCredentials", () => {
clearCredentialsCache();
const result = extractGeminiCliCredentials();
expect(result).toEqual({
clientId: FAKE_CLIENT_ID,
clientSecret: FAKE_CLIENT_SECRET,
});
expectFakeCliCredentials(result);
});
it("extracts credentials when PATH entry is an npm global shim", async () => {
@ -182,10 +186,7 @@ describe("extractGeminiCliCredentials", () => {
clearCredentialsCache();
const result = extractGeminiCliCredentials();
expect(result).toEqual({
clientId: FAKE_CLIENT_ID,
clientSecret: FAKE_CLIENT_SECRET,
});
expectFakeCliCredentials(result);
});
it("returns null when oauth2.js cannot be found", async () => {
@ -274,16 +275,16 @@ describe("loginGeminiCliOAuth", () => {
});
}
async function runRemoteLoginWithCapturedAuthUrl(
loginGeminiCliOAuth: (options: {
isRemote: boolean;
openUrl: () => Promise<void>;
log: (msg: string) => void;
note: () => Promise<void>;
prompt: () => Promise<string>;
progress: { update: () => void; stop: () => void };
}) => Promise<{ projectId: string }>,
) {
type LoginGeminiCliOAuthFn = (options: {
isRemote: boolean;
openUrl: () => Promise<void>;
log: (msg: string) => void;
note: () => Promise<void>;
prompt: () => Promise<string>;
progress: { update: () => void; stop: () => void };
}) => Promise<{ projectId: string }>;
async function runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth: LoginGeminiCliOAuthFn) {
let authUrl = "";
const result = await loginGeminiCliOAuth({
isRemote: true,
@ -304,6 +305,14 @@ describe("loginGeminiCliOAuth", () => {
return { result, authUrl };
}
async function runRemoteLoginExpectingProjectId(
loginGeminiCliOAuth: LoginGeminiCliOAuthFn,
projectId: string,
) {
const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth);
expect(result.projectId).toBe(projectId);
}
let envSnapshot: Partial<Record<(typeof ENV_KEYS)[number], string>>;
beforeEach(() => {
envSnapshot = Object.fromEntries(ENV_KEYS.map((key) => [key, process.env[key]]));
@ -357,9 +366,7 @@ describe("loginGeminiCliOAuth", () => {
vi.stubGlobal("fetch", fetchMock);
const { loginGeminiCliOAuth } = await import("./oauth.js");
const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth);
expect(result.projectId).toBe("daily-project");
await runRemoteLoginExpectingProjectId(loginGeminiCliOAuth, "daily-project");
const loadRequests = requests.filter((request) =>
request.url.includes("v1internal:loadCodeAssist"),
);
@ -414,9 +421,7 @@ describe("loginGeminiCliOAuth", () => {
vi.stubGlobal("fetch", fetchMock);
const { loginGeminiCliOAuth } = await import("./oauth.js");
const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth);
expect(result.projectId).toBe("env-project");
await runRemoteLoginExpectingProjectId(loginGeminiCliOAuth, "env-project");
expect(requests.filter((url) => url.includes("v1internal:loadCodeAssist"))).toHaveLength(3);
expect(requests.some((url) => url.includes("v1internal:onboardUser"))).toBe(false);
});

View File

@ -7,6 +7,9 @@
"dependencies": {
"google-auth-library": "^10.6.1"
},
"devDependencies": {
"openclaw": "workspace:*"
},
"peerDependencies": {
"openclaw": ">=2026.3.11"
},

View File

@ -13,6 +13,21 @@ const account = {
config: {},
} as ResolvedGoogleChatAccount;
function stubSuccessfulSend(name: string) {
const fetchMock = vi
.fn()
.mockResolvedValue(new Response(JSON.stringify({ name }), { status: 200 }));
vi.stubGlobal("fetch", fetchMock);
return fetchMock;
}
async function expectDownloadToRejectForResponse(response: Response) {
vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response));
await expect(
downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }),
).rejects.toThrow(/max bytes/i);
}
describe("downloadGoogleChatMedia", () => {
afterEach(() => {
vi.unstubAllGlobals();
@ -29,11 +44,7 @@ describe("downloadGoogleChatMedia", () => {
status: 200,
headers: { "content-length": "50", "content-type": "application/octet-stream" },
});
vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response));
await expect(
downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }),
).rejects.toThrow(/max bytes/i);
await expectDownloadToRejectForResponse(response);
});
it("rejects when streamed payload exceeds max bytes", async () => {
@ -52,11 +63,7 @@ describe("downloadGoogleChatMedia", () => {
status: 200,
headers: { "content-type": "application/octet-stream" },
});
vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response));
await expect(
downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }),
).rejects.toThrow(/max bytes/i);
await expectDownloadToRejectForResponse(response);
});
});
@ -66,12 +73,7 @@ describe("sendGoogleChatMessage", () => {
});
it("adds messageReplyOption when sending to an existing thread", async () => {
const fetchMock = vi
.fn()
.mockResolvedValue(
new Response(JSON.stringify({ name: "spaces/AAA/messages/123" }), { status: 200 }),
);
vi.stubGlobal("fetch", fetchMock);
const fetchMock = stubSuccessfulSend("spaces/AAA/messages/123");
await sendGoogleChatMessage({
account,
@ -89,12 +91,7 @@ describe("sendGoogleChatMessage", () => {
});
it("does not set messageReplyOption for non-thread sends", async () => {
const fetchMock = vi
.fn()
.mockResolvedValue(
new Response(JSON.stringify({ name: "spaces/AAA/messages/124" }), { status: 200 }),
);
vi.stubGlobal("fetch", fetchMock);
const fetchMock = stubSuccessfulSend("spaces/AAA/messages/124");
await sendGoogleChatMessage({
account,

View File

@ -14,70 +14,24 @@ const headersToObject = (headers?: HeadersInit): Record<string, string> =>
? Object.fromEntries(headers)
: headers || {};
async function fetchJson<T>(
account: ResolvedGoogleChatAccount,
url: string,
init: RequestInit,
): Promise<T> {
const token = await getGoogleChatAccessToken(account);
const { response: res, release } = await fetchWithSsrFGuard({
async function withGoogleChatResponse<T>(params: {
account: ResolvedGoogleChatAccount;
url: string;
init?: RequestInit;
auditContext: string;
errorPrefix?: string;
handleResponse: (response: Response) => Promise<T>;
}): Promise<T> {
const {
account,
url,
init: {
...init,
headers: {
...headersToObject(init.headers),
Authorization: `Bearer ${token}`,
"Content-Type": "application/json",
},
},
auditContext: "googlechat.api.json",
});
try {
if (!res.ok) {
const text = await res.text().catch(() => "");
throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`);
}
return (await res.json()) as T;
} finally {
await release();
}
}
async function fetchOk(
account: ResolvedGoogleChatAccount,
url: string,
init: RequestInit,
): Promise<void> {
init,
auditContext,
errorPrefix = "Google Chat API",
handleResponse,
} = params;
const token = await getGoogleChatAccessToken(account);
const { response: res, release } = await fetchWithSsrFGuard({
url,
init: {
...init,
headers: {
...headersToObject(init.headers),
Authorization: `Bearer ${token}`,
},
},
auditContext: "googlechat.api.ok",
});
try {
if (!res.ok) {
const text = await res.text().catch(() => "");
throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`);
}
} finally {
await release();
}
}
async function fetchBuffer(
account: ResolvedGoogleChatAccount,
url: string,
init?: RequestInit,
options?: { maxBytes?: number },
): Promise<{ buffer: Buffer; contentType?: string }> {
const token = await getGoogleChatAccessToken(account);
const { response: res, release } = await fetchWithSsrFGuard({
const { response, release } = await fetchWithSsrFGuard({
url,
init: {
...init,
@ -86,52 +40,103 @@ async function fetchBuffer(
Authorization: `Bearer ${token}`,
},
},
auditContext: "googlechat.api.buffer",
auditContext,
});
try {
if (!res.ok) {
const text = await res.text().catch(() => "");
throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`);
if (!response.ok) {
const text = await response.text().catch(() => "");
throw new Error(`${errorPrefix} ${response.status}: ${text || response.statusText}`);
}
const maxBytes = options?.maxBytes;
const lengthHeader = res.headers.get("content-length");
if (maxBytes && lengthHeader) {
const length = Number(lengthHeader);
if (Number.isFinite(length) && length > maxBytes) {
throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`);
}
}
if (!maxBytes || !res.body) {
const buffer = Buffer.from(await res.arrayBuffer());
const contentType = res.headers.get("content-type") ?? undefined;
return { buffer, contentType };
}
const reader = res.body.getReader();
const chunks: Buffer[] = [];
let total = 0;
while (true) {
const { done, value } = await reader.read();
if (done) {
break;
}
if (!value) {
continue;
}
total += value.length;
if (total > maxBytes) {
await reader.cancel();
throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`);
}
chunks.push(Buffer.from(value));
}
const buffer = Buffer.concat(chunks, total);
const contentType = res.headers.get("content-type") ?? undefined;
return { buffer, contentType };
return await handleResponse(response);
} finally {
await release();
}
}
async function fetchJson<T>(
account: ResolvedGoogleChatAccount,
url: string,
init: RequestInit,
): Promise<T> {
return await withGoogleChatResponse({
account,
url,
init: {
...init,
headers: {
...headersToObject(init.headers),
"Content-Type": "application/json",
},
},
auditContext: "googlechat.api.json",
handleResponse: async (response) => (await response.json()) as T,
});
}
async function fetchOk(
account: ResolvedGoogleChatAccount,
url: string,
init: RequestInit,
): Promise<void> {
await withGoogleChatResponse({
account,
url,
init,
auditContext: "googlechat.api.ok",
handleResponse: async () => undefined,
});
}
async function fetchBuffer(
account: ResolvedGoogleChatAccount,
url: string,
init?: RequestInit,
options?: { maxBytes?: number },
): Promise<{ buffer: Buffer; contentType?: string }> {
return await withGoogleChatResponse({
account,
url,
init,
auditContext: "googlechat.api.buffer",
handleResponse: async (res) => {
const maxBytes = options?.maxBytes;
const lengthHeader = res.headers.get("content-length");
if (maxBytes && lengthHeader) {
const length = Number(lengthHeader);
if (Number.isFinite(length) && length > maxBytes) {
throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`);
}
}
if (!maxBytes || !res.body) {
const buffer = Buffer.from(await res.arrayBuffer());
const contentType = res.headers.get("content-type") ?? undefined;
return { buffer, contentType };
}
const reader = res.body.getReader();
const chunks: Buffer[] = [];
let total = 0;
while (true) {
const { done, value } = await reader.read();
if (done) {
break;
}
if (!value) {
continue;
}
total += value.length;
if (total > maxBytes) {
await reader.cancel();
throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`);
}
chunks.push(Buffer.from(value));
}
const buffer = Buffer.concat(chunks, total);
const contentType = res.headers.get("content-type") ?? undefined;
return { buffer, contentType };
},
});
}
export async function sendGoogleChatMessage(params: {
account: ResolvedGoogleChatAccount;
space: string;
@ -208,34 +213,29 @@ export async function uploadGoogleChatAttachment(params: {
Buffer.from(footer, "utf8"),
]);
const token = await getGoogleChatAccessToken(account);
const url = `${CHAT_UPLOAD_BASE}/${space}/attachments:upload?uploadType=multipart`;
const { response: res, release } = await fetchWithSsrFGuard({
const payload = await withGoogleChatResponse<{
attachmentDataRef?: { attachmentUploadToken?: string };
}>({
account,
url,
init: {
method: "POST",
headers: {
Authorization: `Bearer ${token}`,
"Content-Type": `multipart/related; boundary=${boundary}`,
},
body,
},
auditContext: "googlechat.upload",
errorPrefix: "Google Chat upload",
handleResponse: async (response) =>
(await response.json()) as {
attachmentDataRef?: { attachmentUploadToken?: string };
},
});
try {
if (!res.ok) {
const text = await res.text().catch(() => "");
throw new Error(`Google Chat upload ${res.status}: ${text || res.statusText}`);
}
const payload = (await res.json()) as {
attachmentDataRef?: { attachmentUploadToken?: string };
};
return {
attachmentUploadToken: payload.attachmentDataRef?.attachmentUploadToken,
};
} finally {
await release();
}
return {
attachmentUploadToken: payload.attachmentDataRef?.attachmentUploadToken,
};
}
export async function downloadGoogleChatMedia(params: {

View File

@ -1,6 +1,10 @@
import type { ChannelAccountSnapshot } from "openclaw/plugin-sdk/googlechat";
import { afterEach, describe, expect, it, vi } from "vitest";
import { createStartAccountContext } from "../../test-utils/start-account-context.js";
import {
abortStartedAccount,
expectPendingUntilAbort,
startAccountAndTrackLifecycle,
} from "../../test-utils/start-account-lifecycle.js";
import type { ResolvedGoogleChatAccount } from "./accounts.js";
const hoisted = vi.hoisted(() => ({
@ -39,29 +43,25 @@ describe("googlechatPlugin gateway.startAccount", () => {
},
};
const patches: ChannelAccountSnapshot[] = [];
const abort = new AbortController();
const task = googlechatPlugin.gateway!.startAccount!(
createStartAccountContext({
account,
abortSignal: abort.signal,
statusPatchSink: (next) => patches.push({ ...next }),
}),
);
let settled = false;
void task.then(() => {
settled = true;
const { abort, patches, task, isSettled } = startAccountAndTrackLifecycle({
startAccount: googlechatPlugin.gateway!.startAccount!,
account,
});
await vi.waitFor(() => {
expect(hoisted.startGoogleChatMonitor).toHaveBeenCalledOnce();
await expectPendingUntilAbort({
waitForStarted: () =>
vi.waitFor(() => {
expect(hoisted.startGoogleChatMonitor).toHaveBeenCalledOnce();
}),
isSettled,
abort,
task,
assertBeforeAbort: () => {
expect(unregister).not.toHaveBeenCalled();
},
assertAfterAbort: () => {
expect(unregister).toHaveBeenCalledOnce();
},
});
expect(settled).toBe(false);
expect(unregister).not.toHaveBeenCalled();
abort.abort();
await task;
expect(unregister).toHaveBeenCalledOnce();
expect(patches.some((entry) => entry.running === true)).toBe(true);
expect(patches.some((entry) => entry.running === false)).toBe(true);
});

View File

@ -117,6 +117,34 @@ function registerTwoTargets() {
};
}
async function dispatchWebhookRequest(req: IncomingMessage) {
const res = createMockServerResponse();
const handled = await handleGoogleChatWebhookRequest(req, res);
expect(handled).toBe(true);
return res;
}
async function expectVerifiedRoute(params: {
request: IncomingMessage;
expectedStatus: number;
sinkA: ReturnType<typeof vi.fn>;
sinkB: ReturnType<typeof vi.fn>;
expectedSink: "none" | "A" | "B";
}) {
const res = await dispatchWebhookRequest(params.request);
expect(res.statusCode).toBe(params.expectedStatus);
const expectedCounts =
params.expectedSink === "A" ? [1, 0] : params.expectedSink === "B" ? [0, 1] : [0, 0];
expect(params.sinkA).toHaveBeenCalledTimes(expectedCounts[0]);
expect(params.sinkB).toHaveBeenCalledTimes(expectedCounts[1]);
}
function mockSecondVerifierSuccess() {
vi.mocked(verifyGoogleChatRequest)
.mockResolvedValueOnce({ ok: false, reason: "invalid" })
.mockResolvedValueOnce({ ok: true });
}
describe("Google Chat webhook routing", () => {
afterEach(() => {
setActivePluginRegistry(createEmptyPluginRegistry());
@ -165,45 +193,37 @@ describe("Google Chat webhook routing", () => {
const { sinkA, sinkB, unregister } = registerTwoTargets();
try {
const res = createMockServerResponse();
const handled = await handleGoogleChatWebhookRequest(
createWebhookRequest({
await expectVerifiedRoute({
request: createWebhookRequest({
authorization: "Bearer test-token",
payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/AAA" } },
}),
res,
);
expect(handled).toBe(true);
expect(res.statusCode).toBe(401);
expect(sinkA).not.toHaveBeenCalled();
expect(sinkB).not.toHaveBeenCalled();
expectedStatus: 401,
sinkA,
sinkB,
expectedSink: "none",
});
} finally {
unregister();
}
});
it("routes to the single verified target when earlier targets fail verification", async () => {
vi.mocked(verifyGoogleChatRequest)
.mockResolvedValueOnce({ ok: false, reason: "invalid" })
.mockResolvedValueOnce({ ok: true });
mockSecondVerifierSuccess();
const { sinkA, sinkB, unregister } = registerTwoTargets();
try {
const res = createMockServerResponse();
const handled = await handleGoogleChatWebhookRequest(
createWebhookRequest({
await expectVerifiedRoute({
request: createWebhookRequest({
authorization: "Bearer test-token",
payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/BBB" } },
}),
res,
);
expect(handled).toBe(true);
expect(res.statusCode).toBe(200);
expect(sinkA).not.toHaveBeenCalled();
expect(sinkB).toHaveBeenCalledTimes(1);
expectedStatus: 200,
sinkA,
sinkB,
expectedSink: "B",
});
} finally {
unregister();
}
@ -218,10 +238,7 @@ describe("Google Chat webhook routing", () => {
authorization: "Bearer invalid-token",
});
const onSpy = vi.spyOn(req, "on");
const res = createMockServerResponse();
const handled = await handleGoogleChatWebhookRequest(req, res);
expect(handled).toBe(true);
const res = await dispatchWebhookRequest(req);
expect(res.statusCode).toBe(401);
expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function));
} finally {
@ -230,15 +247,12 @@ describe("Google Chat webhook routing", () => {
});
it("supports add-on requests that provide systemIdToken in the body", async () => {
vi.mocked(verifyGoogleChatRequest)
.mockResolvedValueOnce({ ok: false, reason: "invalid" })
.mockResolvedValueOnce({ ok: true });
mockSecondVerifierSuccess();
const { sinkA, sinkB, unregister } = registerTwoTargets();
try {
const res = createMockServerResponse();
const handled = await handleGoogleChatWebhookRequest(
createWebhookRequest({
await expectVerifiedRoute({
request: createWebhookRequest({
payload: {
commonEventObject: { hostApp: "CHAT" },
authorizationEventObject: { systemIdToken: "addon-token" },
@ -252,13 +266,11 @@ describe("Google Chat webhook routing", () => {
},
},
}),
res,
);
expect(handled).toBe(true);
expect(res.statusCode).toBe(200);
expect(sinkA).not.toHaveBeenCalled();
expect(sinkB).toHaveBeenCalledTimes(1);
expectedStatus: 200,
sinkA,
sinkB,
expectedSink: "B",
});
} finally {
unregister();
}

Some files were not shown because too many files have changed in this diff Show More