Merge branch 'main' into ui/dashboard-v2.1
This commit is contained in:
commit
9505224316
@ -1,5 +1,11 @@
|
||||
.git
|
||||
.worktrees
|
||||
|
||||
# Sensitive files – docker-setup.sh writes .env with OPENCLAW_GATEWAY_TOKEN
|
||||
# into the project root; keep it out of the build context.
|
||||
.env
|
||||
.env.*
|
||||
|
||||
.bun-cache
|
||||
.bun
|
||||
.tmp
|
||||
|
||||
4
.github/actions/setup-node-env/action.yml
vendored
4
.github/actions/setup-node-env/action.yml
vendored
@ -49,7 +49,7 @@ runs:
|
||||
exit 1
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: ${{ inputs.node-version }}
|
||||
check-latest: false
|
||||
@ -63,7 +63,7 @@ runs:
|
||||
|
||||
- name: Setup Bun
|
||||
if: inputs.install-bun == 'true'
|
||||
uses: oven-sh/setup-bun@v2
|
||||
uses: oven-sh/setup-bun@v2.1.3
|
||||
with:
|
||||
bun-version: "1.3.9"
|
||||
|
||||
|
||||
@ -61,14 +61,14 @@ runs:
|
||||
- name: Restore pnpm store cache (exact key only)
|
||||
# PRs that request sticky disks still need a safe cache restore path.
|
||||
if: inputs.use-actions-cache == 'true' && (inputs.use-sticky-disk != 'true' || github.event_name == 'pull_request') && inputs.use-restore-keys != 'true'
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ${{ steps.pnpm-store.outputs.path }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ inputs.cache-key-suffix }}-${{ hashFiles('pnpm-lock.yaml') }}
|
||||
|
||||
- name: Restore pnpm store cache (with fallback keys)
|
||||
if: inputs.use-actions-cache == 'true' && (inputs.use-sticky-disk != 'true' || github.event_name == 'pull_request') && inputs.use-restore-keys == 'true'
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ${{ steps.pnpm-store.outputs.path }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ inputs.cache-key-suffix }}-${{ hashFiles('pnpm-lock.yaml') }}
|
||||
|
||||
11
.github/workflows/auto-response.yml
vendored
11
.github/workflows/auto-response.yml
vendored
@ -5,9 +5,12 @@ on:
|
||||
types: [opened, edited, labeled]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_target:
|
||||
pull_request_target: # zizmor: ignore[dangerous-triggers] maintainer-owned label automation; no untrusted checkout or code execution
|
||||
types: [labeled]
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
@ -17,20 +20,20 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
continue-on-error: true
|
||||
with:
|
||||
app-id: "2729701"
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token-fallback
|
||||
if: steps.app-token.outcome == 'failure'
|
||||
with:
|
||||
app-id: "2971289"
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }}
|
||||
- name: Handle labeled items
|
||||
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
|
||||
script: |
|
||||
|
||||
138
.github/workflows/ci.yml
vendored
138
.github/workflows/ci.yml
vendored
@ -7,7 +7,10 @@ on:
|
||||
|
||||
concurrency:
|
||||
group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
|
||||
jobs:
|
||||
# Detect docs-only changes to skip heavy jobs (test, build, Windows, macOS, Android).
|
||||
@ -19,7 +22,7 @@ jobs:
|
||||
docs_changed: ${{ steps.check.outputs.docs_changed }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
fetch-tags: false
|
||||
@ -35,9 +38,8 @@ jobs:
|
||||
id: check
|
||||
uses: ./.github/actions/detect-docs-changes
|
||||
|
||||
# Detect which heavy areas are touched so PRs can skip unrelated expensive jobs.
|
||||
# Push to main keeps broad coverage, but this job still needs to run so
|
||||
# downstream jobs that list it in `needs` are not skipped.
|
||||
# Detect which heavy areas are touched so CI can skip unrelated expensive jobs.
|
||||
# Fail-safe: if detection fails, downstream jobs run.
|
||||
changed-scope:
|
||||
needs: [docs-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true'
|
||||
@ -50,7 +52,7 @@ jobs:
|
||||
run_windows: ${{ steps.scope.outputs.run_windows }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
fetch-tags: false
|
||||
@ -79,11 +81,11 @@ jobs:
|
||||
# Build dist once for Node-relevant changes and share it with downstream jobs.
|
||||
build-artifacts:
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
|
||||
if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -98,13 +100,13 @@ jobs:
|
||||
uses: ./.github/actions/setup-node-env
|
||||
with:
|
||||
install-bun: "false"
|
||||
use-sticky-disk: "true"
|
||||
use-sticky-disk: "false"
|
||||
|
||||
- name: Build dist
|
||||
run: pnpm build
|
||||
|
||||
- name: Upload dist artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: dist-build
|
||||
path: dist/
|
||||
@ -117,7 +119,7 @@ jobs:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -125,10 +127,10 @@ jobs:
|
||||
uses: ./.github/actions/setup-node-env
|
||||
with:
|
||||
install-bun: "false"
|
||||
use-sticky-disk: "true"
|
||||
use-sticky-disk: "false"
|
||||
|
||||
- name: Download dist artifact
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: dist-build
|
||||
path: dist/
|
||||
@ -138,7 +140,7 @@ jobs:
|
||||
|
||||
checks:
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@ -146,6 +148,13 @@ jobs:
|
||||
include:
|
||||
- runtime: node
|
||||
task: test
|
||||
shard_index: 1
|
||||
shard_count: 2
|
||||
command: pnpm canvas:a2ui:bundle && pnpm test
|
||||
- runtime: node
|
||||
task: test
|
||||
shard_index: 2
|
||||
shard_count: 2
|
||||
command: pnpm canvas:a2ui:bundle && pnpm test
|
||||
- runtime: node
|
||||
task: extensions
|
||||
@ -157,44 +166,51 @@ jobs:
|
||||
task: test
|
||||
command: pnpm canvas:a2ui:bundle && bunx vitest run --config vitest.unit.config.ts
|
||||
steps:
|
||||
- name: Skip bun lane on push
|
||||
if: github.event_name == 'push' && matrix.runtime == 'bun'
|
||||
run: echo "Skipping bun test lane on push events."
|
||||
- name: Skip bun lane on pull requests
|
||||
if: github.event_name == 'pull_request' && matrix.runtime == 'bun'
|
||||
run: echo "Skipping Bun compatibility lane on pull requests."
|
||||
|
||||
- name: Checkout
|
||||
if: github.event_name != 'push' || matrix.runtime != 'bun'
|
||||
uses: actions/checkout@v4
|
||||
if: github.event_name != 'pull_request' || matrix.runtime != 'bun'
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
- name: Setup Node environment
|
||||
if: matrix.runtime != 'bun' || github.event_name != 'push'
|
||||
if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
|
||||
uses: ./.github/actions/setup-node-env
|
||||
with:
|
||||
install-bun: "${{ matrix.runtime == 'bun' }}"
|
||||
use-sticky-disk: "true"
|
||||
use-sticky-disk: "false"
|
||||
|
||||
- name: Configure Node test resources
|
||||
if: (github.event_name != 'push' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
|
||||
if: (github.event_name != 'pull_request' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
|
||||
env:
|
||||
SHARD_COUNT: ${{ matrix.shard_count || '' }}
|
||||
SHARD_INDEX: ${{ matrix.shard_index || '' }}
|
||||
run: |
|
||||
# `pnpm test` runs `scripts/test-parallel.mjs`, which spawns multiple Node processes.
|
||||
# Default heap limits have been too low on Linux CI (V8 OOM near 4GB).
|
||||
echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV"
|
||||
echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV"
|
||||
if [ -n "$SHARD_COUNT" ] && [ -n "$SHARD_INDEX" ]; then
|
||||
echo "OPENCLAW_TEST_SHARDS=$SHARD_COUNT" >> "$GITHUB_ENV"
|
||||
echo "OPENCLAW_TEST_SHARD_INDEX=$SHARD_INDEX" >> "$GITHUB_ENV"
|
||||
fi
|
||||
|
||||
- name: Run ${{ matrix.task }} (${{ matrix.runtime }})
|
||||
if: matrix.runtime != 'bun' || github.event_name != 'push'
|
||||
if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
|
||||
run: ${{ matrix.command }}
|
||||
|
||||
# Types, lint, and format check.
|
||||
check:
|
||||
name: "check"
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -202,7 +218,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-node-env
|
||||
with:
|
||||
install-bun: "false"
|
||||
use-sticky-disk: "true"
|
||||
use-sticky-disk: "false"
|
||||
|
||||
- name: Check types and lint and oxfmt
|
||||
run: pnpm check
|
||||
@ -220,7 +236,7 @@ jobs:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -228,7 +244,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-node-env
|
||||
with:
|
||||
install-bun: "false"
|
||||
use-sticky-disk: "true"
|
||||
use-sticky-disk: "false"
|
||||
|
||||
- name: Check docs
|
||||
run: pnpm check:docs
|
||||
@ -236,11 +252,11 @@ jobs:
|
||||
compat-node22:
|
||||
name: "compat-node22"
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
|
||||
if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -250,7 +266,7 @@ jobs:
|
||||
node-version: "22.x"
|
||||
cache-key-suffix: "node22"
|
||||
install-bun: "false"
|
||||
use-sticky-disk: "true"
|
||||
use-sticky-disk: "false"
|
||||
|
||||
- name: Configure Node 22 test resources
|
||||
run: |
|
||||
@ -269,16 +285,16 @@ jobs:
|
||||
|
||||
skills-python:
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true' || needs.changed-scope.outputs.run_skills_python == 'true')
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_skills_python == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
@ -297,7 +313,7 @@ jobs:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -316,7 +332,7 @@ jobs:
|
||||
|
||||
- name: Setup Python
|
||||
id: setup-python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
cache: "pip"
|
||||
@ -326,7 +342,7 @@ jobs:
|
||||
.github/workflows/ci.yml
|
||||
|
||||
- name: Restore pre-commit cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cache/pre-commit
|
||||
key: pre-commit-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
@ -362,7 +378,7 @@ jobs:
|
||||
|
||||
checks-windows:
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_windows == 'true')
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_windows == 'true'
|
||||
runs-on: blacksmith-32vcpu-windows-2025
|
||||
timeout-minutes: 45
|
||||
env:
|
||||
@ -409,7 +425,7 @@ jobs:
|
||||
command: pnpm test
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -433,7 +449,7 @@ jobs:
|
||||
}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 24.x
|
||||
check-latest: false
|
||||
@ -495,7 +511,7 @@ jobs:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -531,7 +547,7 @@ jobs:
|
||||
swiftformat --lint apps/macos/Sources --config .swiftformat
|
||||
|
||||
- name: Cache SwiftPM
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/Library/Caches/org.swift.swiftpm
|
||||
key: ${{ runner.os }}-swiftpm-${{ hashFiles('apps/macos/Package.resolved') }}
|
||||
@ -567,7 +583,7 @@ jobs:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -724,7 +740,7 @@ jobs:
|
||||
|
||||
android:
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_android == 'true')
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_android == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@ -736,31 +752,45 @@ jobs:
|
||||
command: ./gradlew --no-daemon :app:assembleDebug
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
- name: Setup Java
|
||||
uses: actions/setup-java@v4
|
||||
uses: actions/setup-java@v5
|
||||
with:
|
||||
distribution: temurin
|
||||
# setup-android's sdkmanager currently crashes on JDK 21 in CI.
|
||||
# Keep sdkmanager on the stable JDK path for Linux CI runners.
|
||||
java-version: 17
|
||||
|
||||
- name: Setup Android SDK
|
||||
uses: android-actions/setup-android@v3
|
||||
with:
|
||||
accept-android-sdk-licenses: false
|
||||
- name: Setup Android SDK cmdline-tools
|
||||
run: |
|
||||
set -euo pipefail
|
||||
ANDROID_SDK_ROOT="$HOME/.android-sdk"
|
||||
CMDLINE_TOOLS_VERSION="12266719"
|
||||
ARCHIVE="commandlinetools-linux-${CMDLINE_TOOLS_VERSION}_latest.zip"
|
||||
URL="https://dl.google.com/android/repository/${ARCHIVE}"
|
||||
|
||||
mkdir -p "$ANDROID_SDK_ROOT/cmdline-tools"
|
||||
curl -fsSL "$URL" -o "/tmp/${ARCHIVE}"
|
||||
rm -rf "$ANDROID_SDK_ROOT/cmdline-tools/latest"
|
||||
unzip -q "/tmp/${ARCHIVE}" -d "$ANDROID_SDK_ROOT/cmdline-tools"
|
||||
mv "$ANDROID_SDK_ROOT/cmdline-tools/cmdline-tools" "$ANDROID_SDK_ROOT/cmdline-tools/latest"
|
||||
|
||||
echo "ANDROID_SDK_ROOT=$ANDROID_SDK_ROOT" >> "$GITHUB_ENV"
|
||||
echo "ANDROID_HOME=$ANDROID_SDK_ROOT" >> "$GITHUB_ENV"
|
||||
echo "$ANDROID_SDK_ROOT/cmdline-tools/latest/bin" >> "$GITHUB_PATH"
|
||||
echo "$ANDROID_SDK_ROOT/platform-tools" >> "$GITHUB_PATH"
|
||||
|
||||
- name: Setup Gradle
|
||||
uses: gradle/actions/setup-gradle@v4
|
||||
uses: gradle/actions/setup-gradle@v5
|
||||
with:
|
||||
gradle-version: 8.11.1
|
||||
|
||||
- name: Install Android SDK packages
|
||||
run: |
|
||||
yes | sdkmanager --licenses >/dev/null
|
||||
sdkmanager --install \
|
||||
yes | sdkmanager --sdk_root="${ANDROID_SDK_ROOT}" --licenses >/dev/null
|
||||
sdkmanager --sdk_root="${ANDROID_SDK_ROOT}" --install \
|
||||
"platform-tools" \
|
||||
"platforms;android-36" \
|
||||
"build-tools;36.0.0"
|
||||
|
||||
11
.github/workflows/codeql.yml
vendored
11
.github/workflows/codeql.yml
vendored
@ -7,6 +7,9 @@ concurrency:
|
||||
group: codeql-${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
@ -67,7 +70,7 @@ jobs:
|
||||
config_file: ""
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -76,17 +79,17 @@ jobs:
|
||||
uses: ./.github/actions/setup-node-env
|
||||
with:
|
||||
install-bun: "false"
|
||||
use-sticky-disk: "true"
|
||||
use-sticky-disk: "false"
|
||||
|
||||
- name: Setup Python
|
||||
if: matrix.needs_python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Setup Java
|
||||
if: matrix.needs_java
|
||||
uses: actions/setup-java@v4
|
||||
uses: actions/setup-java@v5
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: "21"
|
||||
|
||||
17
.github/workflows/docker-release.yml
vendored
17
.github/workflows/docker-release.yml
vendored
@ -18,6 +18,7 @@ concurrency:
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
@ -33,13 +34,13 @@ jobs:
|
||||
slim-digest: ${{ steps.build-slim.outputs.digest }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Docker Builder
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v4
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v4
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.repository_owner }}
|
||||
@ -134,13 +135,13 @@ jobs:
|
||||
slim-digest: ${{ steps.build-slim.outputs.digest }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Docker Builder
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v4
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v4
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.repository_owner }}
|
||||
@ -233,10 +234,10 @@ jobs:
|
||||
needs: [build-amd64, build-arm64]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v4
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.repository_owner }}
|
||||
|
||||
9
.github/workflows/install-smoke.yml
vendored
9
.github/workflows/install-smoke.yml
vendored
@ -10,6 +10,9 @@ concurrency:
|
||||
group: install-smoke-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
|
||||
jobs:
|
||||
docs-scope:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
@ -17,7 +20,7 @@ jobs:
|
||||
docs_only: ${{ steps.check.outputs.docs_only }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
fetch-tags: false
|
||||
@ -38,10 +41,10 @@ jobs:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout CLI
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Docker Builder
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v4
|
||||
|
||||
# Blacksmith can fall back to the local docker driver, which rejects gha
|
||||
# cache export/import. Keep smoke builds driver-agnostic.
|
||||
|
||||
29
.github/workflows/labeler.yml
vendored
29
.github/workflows/labeler.yml
vendored
@ -1,7 +1,7 @@
|
||||
name: Labeler
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
pull_request_target: # zizmor: ignore[dangerous-triggers] maintainer-owned triage workflow; no untrusted checkout or PR code execution
|
||||
types: [opened, synchronize, reopened]
|
||||
issues:
|
||||
types: [opened]
|
||||
@ -16,6 +16,9 @@ on:
|
||||
required: false
|
||||
default: "50"
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
@ -25,25 +28,25 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
continue-on-error: true
|
||||
with:
|
||||
app-id: "2729701"
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token-fallback
|
||||
if: steps.app-token.outcome == 'failure'
|
||||
with:
|
||||
app-id: "2971289"
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }}
|
||||
- uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5
|
||||
- uses: actions/labeler@v6
|
||||
with:
|
||||
configuration-path: .github/labeler.yml
|
||||
repo-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
|
||||
sync-labels: true
|
||||
- name: Apply PR size label
|
||||
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
|
||||
script: |
|
||||
@ -132,7 +135,7 @@ jobs:
|
||||
labels: [targetSizeLabel],
|
||||
});
|
||||
- name: Apply maintainer or trusted-contributor label
|
||||
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
|
||||
script: |
|
||||
@ -203,7 +206,7 @@ jobs:
|
||||
// });
|
||||
// }
|
||||
- name: Apply too-many-prs label
|
||||
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
|
||||
script: |
|
||||
@ -381,20 +384,20 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
continue-on-error: true
|
||||
with:
|
||||
app-id: "2729701"
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token-fallback
|
||||
if: steps.app-token.outcome == 'failure'
|
||||
with:
|
||||
app-id: "2971289"
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }}
|
||||
- name: Backfill PR labels
|
||||
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
|
||||
script: |
|
||||
@ -629,20 +632,20 @@ jobs:
|
||||
issues: write
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
continue-on-error: true
|
||||
with:
|
||||
app-id: "2729701"
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token-fallback
|
||||
if: steps.app-token.outcome == 'failure'
|
||||
with:
|
||||
app-id: "2971289"
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }}
|
||||
- name: Apply maintainer or trusted-contributor label
|
||||
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
|
||||
script: |
|
||||
|
||||
3
.github/workflows/openclaw-npm-release.yml
vendored
3
.github/workflows/openclaw-npm-release.yml
vendored
@ -10,6 +10,7 @@ concurrency:
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
NODE_VERSION: "24.x"
|
||||
PNPM_VERSION: "10.23.0"
|
||||
|
||||
@ -22,7 +23,7 @@ jobs:
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
7
.github/workflows/sandbox-common-smoke.yml
vendored
7
.github/workflows/sandbox-common-smoke.yml
vendored
@ -17,17 +17,20 @@ concurrency:
|
||||
group: sandbox-common-smoke-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
|
||||
jobs:
|
||||
sandbox-common-smoke:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
- name: Set up Docker Builder
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v4
|
||||
|
||||
- name: Build minimal sandbox base (USER sandbox)
|
||||
shell: bash
|
||||
|
||||
17
.github/workflows/stale.yml
vendored
17
.github/workflows/stale.yml
vendored
@ -5,6 +5,9 @@ on:
|
||||
- cron: "17 3 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
@ -14,13 +17,13 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
continue-on-error: true
|
||||
with:
|
||||
app-id: "2729701"
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token-fallback
|
||||
continue-on-error: true
|
||||
with:
|
||||
@ -29,7 +32,7 @@ jobs:
|
||||
- name: Mark stale issues and pull requests (primary)
|
||||
id: stale-primary
|
||||
continue-on-error: true
|
||||
uses: actions/stale@v9
|
||||
uses: actions/stale@v10
|
||||
with:
|
||||
repo-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
|
||||
days-before-issue-stale: 7
|
||||
@ -62,7 +65,7 @@ jobs:
|
||||
- name: Check stale state cache
|
||||
id: stale-state
|
||||
if: always()
|
||||
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
github-token: ${{ steps.app-token-fallback.outputs.token || steps.app-token.outputs.token }}
|
||||
script: |
|
||||
@ -85,7 +88,7 @@ jobs:
|
||||
}
|
||||
- name: Mark stale issues and pull requests (fallback)
|
||||
if: (steps.stale-primary.outcome == 'failure' || steps.stale-state.outputs.has_state == 'true') && steps.app-token-fallback.outputs.token != ''
|
||||
uses: actions/stale@v9
|
||||
uses: actions/stale@v10
|
||||
with:
|
||||
repo-token: ${{ steps.app-token-fallback.outputs.token }}
|
||||
days-before-issue-stale: 7
|
||||
@ -121,13 +124,13 @@ jobs:
|
||||
issues: write
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
with:
|
||||
app-id: "2729701"
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||
- name: Lock closed issues after 48h of no comments
|
||||
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
github-token: ${{ steps.app-token.outputs.token }}
|
||||
script: |
|
||||
|
||||
7
.github/workflows/workflow-sanity.yml
vendored
7
.github/workflows/workflow-sanity.yml
vendored
@ -9,12 +9,15 @@ concurrency:
|
||||
group: workflow-sanity-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
|
||||
jobs:
|
||||
no-tabs:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Fail on tabs in workflow files
|
||||
run: |
|
||||
@ -45,7 +48,7 @@ jobs:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Install actionlint
|
||||
shell: bash
|
||||
|
||||
16
.jscpd.json
Normal file
16
.jscpd.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"gitignore": true,
|
||||
"noSymlinks": true,
|
||||
"ignore": [
|
||||
"**/node_modules/**",
|
||||
"**/dist/**",
|
||||
"dist/**",
|
||||
"**/.git/**",
|
||||
"**/coverage/**",
|
||||
"**/build/**",
|
||||
"**/.build/**",
|
||||
"**/.artifacts/**",
|
||||
"docs/zh-CN/**",
|
||||
"**/CHANGELOG.md"
|
||||
]
|
||||
}
|
||||
@ -201,6 +201,14 @@
|
||||
## Agent-Specific Notes
|
||||
|
||||
- Vocabulary: "makeup" = "mac app".
|
||||
- Parallels macOS retests: use the snapshot most closely named like `macOS 26.3.1 fresh` when the user asks for a clean/fresh macOS rerun; avoid older Tahoe snapshots unless explicitly requested.
|
||||
- Parallels macOS smoke playbook:
|
||||
- `prlctl exec` is fine for deterministic repo commands, but it can misrepresent interactive shell behavior (`PATH`, `HOME`, `curl | bash`, shebang resolution). For installer parity or shell-sensitive repros, prefer the guest Terminal or `prlctl enter`.
|
||||
- Fresh Tahoe snapshot current reality: `brew` exists, `node` may not be on `PATH` in noninteractive guest exec. Use absolute `/opt/homebrew/bin/node` for repo/CLI runs when needed.
|
||||
- Fresh host-served tgz install: restore fresh snapshot, install tgz as guest root with `HOME=/var/root`, then run onboarding as the desktop user via `prlctl exec --current-user`.
|
||||
- For `openclaw onboard --non-interactive --secret-input-mode ref --install-daemon`, expect env-backed auth-profile refs (for example `OPENAI_API_KEY`) to be copied into the service env at install time; this path was fixed and should stay green.
|
||||
- Don’t run local + gateway agent turns in parallel on the same fresh workspace/session; they can collide on the session lock. Run sequentially.
|
||||
- Root-installed tarball smoke on Tahoe can still log plugin blocks for world-writable `extensions/*` under `/opt/homebrew/lib/node_modules/openclaw`; treat that as separate from onboarding/gateway health unless the task is plugin loading.
|
||||
- Never edit `node_modules` (global/Homebrew/npm/git installs too). Updates overwrite. Skill notes go in `tools.md` or `AGENTS.md`.
|
||||
- When adding a new `AGENTS.md` anywhere in the repo, also add a `CLAUDE.md` symlink pointing to it (example: `ln -s AGENTS.md CLAUDE.md`).
|
||||
- Signal: "update fly" => `fly ssh console -a flawd-bot -C "bash -lc 'cd /data/clawd/openclaw && git pull --rebase origin main'"` then `fly machines restart e825232f34d058 -a flawd-bot`.
|
||||
|
||||
14
CHANGELOG.md
14
CHANGELOG.md
@ -9,9 +9,12 @@ Docs: https://docs.openclaw.ai
|
||||
- Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus.
|
||||
- Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei.
|
||||
- iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show `/pair qr` instructions on the connect step. (#45054) Thanks @ngutman.
|
||||
- Browser/existing-session: add an official Chrome DevTools MCP attach mode for signed-in live Chrome sessions, with docs for `chrome://inspect/#remote-debugging` enablement and direct backlinks to Chrome’s own setup guides.
|
||||
|
||||
### Fixes
|
||||
|
||||
- Browser/existing-session: accept text-only `list_pages` and `new_page` responses from Chrome DevTools MCP so live-session tab discovery and new-tab open flows keep working when the server omits structured page metadata.
|
||||
- Ollama/reasoning visibility: stop promoting native `thinking` and `reasoning` fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang.
|
||||
- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups.
|
||||
- Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding.
|
||||
- Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus.
|
||||
@ -21,6 +24,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei.
|
||||
- Agents/OpenAI-compatible compat overrides: respect explicit user `models[].compat` opt-ins for non-native `openai-completions` endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference.
|
||||
- Agents/Azure OpenAI startup prompts: rephrase the built-in `/new`, `/reset`, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97.
|
||||
- Windows/gateway stop: resolve Startup-folder fallback listeners from the installed `gateway.cmd` port, so `openclaw gateway stop` now actually kills fallback-launched gateway processes before restart.
|
||||
- Config/validation: accept documented `agents.list[].params` per-agent overrides in strict config validation so `openclaw config validate` no longer rejects runtime-supported `cacheRetention`, `temperature`, and `maxTokens` settings. (#41171) Thanks @atian8179.
|
||||
- Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus.
|
||||
- Config/web fetch: restore runtime validation for documented `tools.web.fetch.readability` and `tools.web.fetch.firecrawl` settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec.
|
||||
@ -28,10 +32,18 @@ Docs: https://docs.openclaw.ai
|
||||
- Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh.
|
||||
- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates.
|
||||
- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path.
|
||||
- Security/exec approvals: recognize PowerShell `-File` and `-f` wrapper forms during inline-command extraction so approval and command-analysis paths treat file-based PowerShell launches like the existing `-Command` variants.
|
||||
- Security/exec approvals: unwrap `env` dispatch wrappers inside shell-segment allowlist resolution on macOS so `env FOO=bar /path/to/bin` resolves against the effective executable instead of the wrapper token.
|
||||
- Security/exec approvals: treat backslash-newline as shell line continuation during macOS shell-chain parsing so line-continued `$(` substitutions fail closed instead of slipping past command-substitution checks.
|
||||
- Security/exec approvals: bind macOS skill auto-allow trust to both executable name and resolved path so same-basename binaries no longer inherit trust from unrelated skill bins.
|
||||
- Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed `EXTERNAL_UNTRUSTED_CONTENT` markers fall back to the existing hardening path instead of bypassing marker normalization.
|
||||
- Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark.
|
||||
- macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots.
|
||||
- Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello.
|
||||
- Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin.
|
||||
- Slack/probe: keep `auth.test()` bot and team metadata mapping stable while simplifying the probe result path. (#44775) Thanks @Cafexss.
|
||||
- Dashboard/chat UI: restore the `chat-new-messages` class on the New messages scroll pill so the button uses its existing compact styling instead of rendering as a full-screen SVG overlay. (#44856) Thanks @Astro-Han.
|
||||
- Windows/gateway status: reuse the installed service command environment when reading runtime status, so startup-fallback gateways keep reporting the configured port and running state in `gateway status --json` instead of falling back to `gateway port unknown`.
|
||||
|
||||
## 2026.3.12
|
||||
|
||||
@ -44,6 +56,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Docs/Kubernetes: Add a starter K8s install path with raw manifests, Kind setup, and deployment docs. Thanks @sallyom @dzianisv @egkristi
|
||||
- Agents/subagents: add `sessions_yield` so orchestrators can end the current turn immediately, skip queued tool work, and carry a hidden follow-up payload into the next session turn. (#36537) thanks @jriff
|
||||
- Slack/agent replies: support `channelData.slack.blocks` in the shared reply delivery path so agents can send Block Kit messages through standard Slack outbound delivery. (#44592) Thanks @vincentkoc.
|
||||
- Slack/interactive replies: add opt-in Slack button and select reply directives behind `channels.slack.capabilities.interactiveReplies`, disabled by default unless explicitly enabled. (#44607) Thanks @vincentkoc.
|
||||
|
||||
### Fixes
|
||||
|
||||
@ -117,6 +130,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Delivery/dedupe: trim completed direct-cron delivery cache correctly and keep mirrored transcript dedupe active even when transcript files contain malformed lines. (#44666) thanks @frankekn.
|
||||
- CLI/thinking help: add the missing `xhigh` level hints to `openclaw cron add`, `openclaw cron edit`, and `openclaw agent` so the help text matches the levels already accepted at runtime. (#44819) Thanks @kiki830621.
|
||||
- Agents/Anthropic replay: drop replayed assistant thinking blocks for native Anthropic and Bedrock Claude providers so persisted follow-up turns no longer fail on stored thinking blocks. (#44843) Thanks @jmcte.
|
||||
- Docs/Brave pricing: escape literal dollar signs in Brave Search cost text so the docs render the free credit and per-request pricing correctly. (#44989) Thanks @keelanfh.
|
||||
|
||||
## 2026.3.11
|
||||
|
||||
|
||||
@ -45,8 +45,8 @@ enum ExecApprovalEvaluator {
|
||||
|
||||
let skillAllow: Bool
|
||||
if approvals.agent.autoAllowSkills, !allowlistResolutions.isEmpty {
|
||||
let bins = await SkillBinsCache.shared.currentBins()
|
||||
skillAllow = allowlistResolutions.allSatisfy { bins.contains($0.executableName) }
|
||||
let bins = await SkillBinsCache.shared.currentTrust()
|
||||
skillAllow = self.isSkillAutoAllowed(allowlistResolutions, trustedBinsByName: bins)
|
||||
} else {
|
||||
skillAllow = false
|
||||
}
|
||||
@ -65,4 +65,26 @@ enum ExecApprovalEvaluator {
|
||||
allowlistMatch: allowlistSatisfied ? allowlistMatches.first : nil,
|
||||
skillAllow: skillAllow)
|
||||
}
|
||||
|
||||
static func isSkillAutoAllowed(
|
||||
_ resolutions: [ExecCommandResolution],
|
||||
trustedBinsByName: [String: Set<String>]) -> Bool
|
||||
{
|
||||
guard !resolutions.isEmpty, !trustedBinsByName.isEmpty else { return false }
|
||||
return resolutions.allSatisfy { resolution in
|
||||
guard let executableName = SkillBinsCache.normalizeSkillBinName(resolution.executableName),
|
||||
let resolvedPath = SkillBinsCache.normalizeResolvedPath(resolution.resolvedPath)
|
||||
else {
|
||||
return false
|
||||
}
|
||||
return trustedBinsByName[executableName]?.contains(resolvedPath) == true
|
||||
}
|
||||
}
|
||||
|
||||
static func _testIsSkillAutoAllowed(
|
||||
_ resolutions: [ExecCommandResolution],
|
||||
trustedBinsByName: [String: Set<String>]) -> Bool
|
||||
{
|
||||
self.isSkillAutoAllowed(resolutions, trustedBinsByName: trustedBinsByName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -777,6 +777,7 @@ actor SkillBinsCache {
|
||||
static let shared = SkillBinsCache()
|
||||
|
||||
private var bins: Set<String> = []
|
||||
private var trustByName: [String: Set<String>] = [:]
|
||||
private var lastRefresh: Date?
|
||||
private let refreshInterval: TimeInterval = 90
|
||||
|
||||
@ -787,27 +788,90 @@ actor SkillBinsCache {
|
||||
return self.bins
|
||||
}
|
||||
|
||||
func currentTrust(force: Bool = false) async -> [String: Set<String>] {
|
||||
if force || self.isStale() {
|
||||
await self.refresh()
|
||||
}
|
||||
return self.trustByName
|
||||
}
|
||||
|
||||
func refresh() async {
|
||||
do {
|
||||
let report = try await GatewayConnection.shared.skillsStatus()
|
||||
var next = Set<String>()
|
||||
for skill in report.skills {
|
||||
for bin in skill.requirements.bins {
|
||||
let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if !trimmed.isEmpty { next.insert(trimmed) }
|
||||
}
|
||||
}
|
||||
self.bins = next
|
||||
let trust = Self.buildTrustIndex(report: report, searchPaths: CommandResolver.preferredPaths())
|
||||
self.bins = trust.names
|
||||
self.trustByName = trust.pathsByName
|
||||
self.lastRefresh = Date()
|
||||
} catch {
|
||||
if self.lastRefresh == nil {
|
||||
self.bins = []
|
||||
self.trustByName = [:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static func normalizeSkillBinName(_ value: String) -> String? {
|
||||
let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines).lowercased()
|
||||
return trimmed.isEmpty ? nil : trimmed
|
||||
}
|
||||
|
||||
static func normalizeResolvedPath(_ value: String?) -> String? {
|
||||
let trimmed = value?.trimmingCharacters(in: .whitespacesAndNewlines) ?? ""
|
||||
guard !trimmed.isEmpty else { return nil }
|
||||
return URL(fileURLWithPath: trimmed).standardizedFileURL.path
|
||||
}
|
||||
|
||||
static func buildTrustIndex(
|
||||
report: SkillsStatusReport,
|
||||
searchPaths: [String]) -> SkillBinTrustIndex
|
||||
{
|
||||
var names = Set<String>()
|
||||
var pathsByName: [String: Set<String>] = [:]
|
||||
|
||||
for skill in report.skills {
|
||||
for bin in skill.requirements.bins {
|
||||
let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !trimmed.isEmpty else { continue }
|
||||
names.insert(trimmed)
|
||||
|
||||
guard let name = self.normalizeSkillBinName(trimmed),
|
||||
let resolvedPath = self.resolveSkillBinPath(trimmed, searchPaths: searchPaths),
|
||||
let normalizedPath = self.normalizeResolvedPath(resolvedPath)
|
||||
else {
|
||||
continue
|
||||
}
|
||||
|
||||
var paths = pathsByName[name] ?? Set<String>()
|
||||
paths.insert(normalizedPath)
|
||||
pathsByName[name] = paths
|
||||
}
|
||||
}
|
||||
|
||||
return SkillBinTrustIndex(names: names, pathsByName: pathsByName)
|
||||
}
|
||||
|
||||
private static func resolveSkillBinPath(_ bin: String, searchPaths: [String]) -> String? {
|
||||
let expanded = bin.hasPrefix("~") ? (bin as NSString).expandingTildeInPath : bin
|
||||
if expanded.contains("/") || expanded.contains("\\") {
|
||||
return FileManager().isExecutableFile(atPath: expanded) ? expanded : nil
|
||||
}
|
||||
return CommandResolver.findExecutable(named: expanded, searchPaths: searchPaths)
|
||||
}
|
||||
|
||||
private func isStale() -> Bool {
|
||||
guard let lastRefresh else { return true }
|
||||
return Date().timeIntervalSince(lastRefresh) > self.refreshInterval
|
||||
}
|
||||
|
||||
static func _testBuildTrustIndex(
|
||||
report: SkillsStatusReport,
|
||||
searchPaths: [String]) -> SkillBinTrustIndex
|
||||
{
|
||||
self.buildTrustIndex(report: report, searchPaths: searchPaths)
|
||||
}
|
||||
}
|
||||
|
||||
struct SkillBinTrustIndex {
|
||||
let names: Set<String>
|
||||
let pathsByName: [String: Set<String>]
|
||||
}
|
||||
|
||||
@ -37,8 +37,7 @@ struct ExecCommandResolution {
|
||||
var resolutions: [ExecCommandResolution] = []
|
||||
resolutions.reserveCapacity(segments.count)
|
||||
for segment in segments {
|
||||
guard let token = self.parseFirstToken(segment),
|
||||
let resolution = self.resolveExecutable(rawExecutable: token, cwd: cwd, env: env)
|
||||
guard let resolution = self.resolveShellSegmentExecutable(segment, cwd: cwd, env: env)
|
||||
else {
|
||||
return []
|
||||
}
|
||||
@ -88,6 +87,20 @@ struct ExecCommandResolution {
|
||||
cwd: cwd)
|
||||
}
|
||||
|
||||
private static func resolveShellSegmentExecutable(
|
||||
_ segment: String,
|
||||
cwd: String?,
|
||||
env: [String: String]?) -> ExecCommandResolution?
|
||||
{
|
||||
let tokens = self.tokenizeShellWords(segment)
|
||||
guard !tokens.isEmpty else { return nil }
|
||||
let effective = ExecEnvInvocationUnwrapper.unwrapDispatchWrappersForResolution(tokens)
|
||||
guard let raw = effective.first?.trimmingCharacters(in: .whitespacesAndNewlines), !raw.isEmpty else {
|
||||
return nil
|
||||
}
|
||||
return self.resolveExecutable(rawExecutable: raw, cwd: cwd, env: env)
|
||||
}
|
||||
|
||||
private static func parseFirstToken(_ command: String) -> String? {
|
||||
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !trimmed.isEmpty else { return nil }
|
||||
@ -102,6 +115,59 @@ struct ExecCommandResolution {
|
||||
return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init)
|
||||
}
|
||||
|
||||
private static func tokenizeShellWords(_ command: String) -> [String] {
|
||||
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !trimmed.isEmpty else { return [] }
|
||||
|
||||
var tokens: [String] = []
|
||||
var current = ""
|
||||
var inSingle = false
|
||||
var inDouble = false
|
||||
var escaped = false
|
||||
|
||||
func appendCurrent() {
|
||||
guard !current.isEmpty else { return }
|
||||
tokens.append(current)
|
||||
current.removeAll(keepingCapacity: true)
|
||||
}
|
||||
|
||||
for ch in trimmed {
|
||||
if escaped {
|
||||
current.append(ch)
|
||||
escaped = false
|
||||
continue
|
||||
}
|
||||
|
||||
if ch == "\\", !inSingle {
|
||||
escaped = true
|
||||
continue
|
||||
}
|
||||
|
||||
if ch == "'", !inDouble {
|
||||
inSingle.toggle()
|
||||
continue
|
||||
}
|
||||
|
||||
if ch == "\"", !inSingle {
|
||||
inDouble.toggle()
|
||||
continue
|
||||
}
|
||||
|
||||
if ch.isWhitespace, !inSingle, !inDouble {
|
||||
appendCurrent()
|
||||
continue
|
||||
}
|
||||
|
||||
current.append(ch)
|
||||
}
|
||||
|
||||
if escaped {
|
||||
current.append("\\")
|
||||
}
|
||||
appendCurrent()
|
||||
return tokens
|
||||
}
|
||||
|
||||
private enum ShellTokenContext {
|
||||
case unquoted
|
||||
case doubleQuoted
|
||||
@ -148,8 +214,14 @@ struct ExecCommandResolution {
|
||||
while idx < chars.count {
|
||||
let ch = chars[idx]
|
||||
let next: Character? = idx + 1 < chars.count ? chars[idx + 1] : nil
|
||||
let lookahead = self.nextShellSignificantCharacter(chars: chars, after: idx, inSingle: inSingle)
|
||||
|
||||
if escaped {
|
||||
if ch == "\n" {
|
||||
escaped = false
|
||||
idx += 1
|
||||
continue
|
||||
}
|
||||
current.append(ch)
|
||||
escaped = false
|
||||
idx += 1
|
||||
@ -157,6 +229,10 @@ struct ExecCommandResolution {
|
||||
}
|
||||
|
||||
if ch == "\\", !inSingle {
|
||||
if next == "\n" {
|
||||
idx += 2
|
||||
continue
|
||||
}
|
||||
current.append(ch)
|
||||
escaped = true
|
||||
idx += 1
|
||||
@ -177,7 +253,7 @@ struct ExecCommandResolution {
|
||||
continue
|
||||
}
|
||||
|
||||
if !inSingle, self.shouldFailClosedForShell(ch: ch, next: next, inDouble: inDouble) {
|
||||
if !inSingle, self.shouldFailClosedForShell(ch: ch, next: lookahead, inDouble: inDouble) {
|
||||
// Fail closed on command/process substitution in allowlist mode,
|
||||
// including command substitution inside double-quoted shell strings.
|
||||
return nil
|
||||
@ -201,6 +277,25 @@ struct ExecCommandResolution {
|
||||
return segments
|
||||
}
|
||||
|
||||
private static func nextShellSignificantCharacter(
|
||||
chars: [Character],
|
||||
after idx: Int,
|
||||
inSingle: Bool) -> Character?
|
||||
{
|
||||
guard !inSingle else {
|
||||
return idx + 1 < chars.count ? chars[idx + 1] : nil
|
||||
}
|
||||
var cursor = idx + 1
|
||||
while cursor < chars.count {
|
||||
if chars[cursor] == "\\", cursor + 1 < chars.count, chars[cursor + 1] == "\n" {
|
||||
cursor += 2
|
||||
continue
|
||||
}
|
||||
return chars[cursor]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
private static func shouldFailClosedForShell(ch: Character, next: Character?, inDouble: Bool) -> Bool {
|
||||
let context: ShellTokenContext = inDouble ? .doubleQuoted : .unquoted
|
||||
guard let rules = self.shellFailClosedRules[context] else {
|
||||
|
||||
@ -141,6 +141,26 @@ struct ExecAllowlistTests {
|
||||
#expect(resolutions.isEmpty)
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist fails closed on line-continued command substitution`() {
|
||||
let command = ["/bin/sh", "-lc", "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)"]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
command: command,
|
||||
rawCommand: "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)",
|
||||
cwd: nil,
|
||||
env: ["PATH": "/usr/bin:/bin"])
|
||||
#expect(resolutions.isEmpty)
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist fails closed on chained line-continued command substitution`() {
|
||||
let command = ["/bin/sh", "-lc", "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)"]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
command: command,
|
||||
rawCommand: "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)",
|
||||
cwd: nil,
|
||||
env: ["PATH": "/usr/bin:/bin"])
|
||||
#expect(resolutions.isEmpty)
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist fails closed on quoted backticks`() {
|
||||
let command = ["/bin/sh", "-lc", "echo \"ok `/usr/bin/id`\""]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
@ -208,6 +228,30 @@ struct ExecAllowlistTests {
|
||||
#expect(resolutions[1].executableName == "touch")
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist unwraps env dispatch wrappers inside shell segments`() {
|
||||
let command = ["/bin/sh", "-lc", "env /usr/bin/touch /tmp/openclaw-allowlist-test"]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
command: command,
|
||||
rawCommand: "env /usr/bin/touch /tmp/openclaw-allowlist-test",
|
||||
cwd: nil,
|
||||
env: ["PATH": "/usr/bin:/bin"])
|
||||
#expect(resolutions.count == 1)
|
||||
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
|
||||
#expect(resolutions[0].executableName == "touch")
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist unwraps env assignments inside shell segments`() {
|
||||
let command = ["/bin/sh", "-lc", "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test"]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
command: command,
|
||||
rawCommand: "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test",
|
||||
cwd: nil,
|
||||
env: ["PATH": "/usr/bin:/bin"])
|
||||
#expect(resolutions.count == 1)
|
||||
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
|
||||
#expect(resolutions[0].executableName == "touch")
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist unwraps env to effective direct executable`() {
|
||||
let command = ["/usr/bin/env", "FOO=bar", "/usr/bin/printf", "ok"]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
|
||||
@ -0,0 +1,90 @@
|
||||
import Foundation
|
||||
import Testing
|
||||
@testable import OpenClaw
|
||||
|
||||
struct ExecSkillBinTrustTests {
|
||||
@Test func `build trust index resolves skill bin paths`() throws {
|
||||
let fixture = try Self.makeExecutable(named: "jq")
|
||||
defer { try? FileManager.default.removeItem(at: fixture.root) }
|
||||
|
||||
let trust = SkillBinsCache._testBuildTrustIndex(
|
||||
report: Self.makeReport(bins: ["jq"]),
|
||||
searchPaths: [fixture.root.path])
|
||||
|
||||
#expect(trust.names == ["jq"])
|
||||
#expect(trust.pathsByName["jq"] == [fixture.path])
|
||||
}
|
||||
|
||||
@Test func `skill auto allow accepts trusted resolved skill bin path`() throws {
|
||||
let fixture = try Self.makeExecutable(named: "jq")
|
||||
defer { try? FileManager.default.removeItem(at: fixture.root) }
|
||||
|
||||
let trust = SkillBinsCache._testBuildTrustIndex(
|
||||
report: Self.makeReport(bins: ["jq"]),
|
||||
searchPaths: [fixture.root.path])
|
||||
let resolution = ExecCommandResolution(
|
||||
rawExecutable: "jq",
|
||||
resolvedPath: fixture.path,
|
||||
executableName: "jq",
|
||||
cwd: nil)
|
||||
|
||||
#expect(ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName))
|
||||
}
|
||||
|
||||
@Test func `skill auto allow rejects same basename at different path`() throws {
|
||||
let trusted = try Self.makeExecutable(named: "jq")
|
||||
let untrusted = try Self.makeExecutable(named: "jq")
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: trusted.root)
|
||||
try? FileManager.default.removeItem(at: untrusted.root)
|
||||
}
|
||||
|
||||
let trust = SkillBinsCache._testBuildTrustIndex(
|
||||
report: Self.makeReport(bins: ["jq"]),
|
||||
searchPaths: [trusted.root.path])
|
||||
let resolution = ExecCommandResolution(
|
||||
rawExecutable: "jq",
|
||||
resolvedPath: untrusted.path,
|
||||
executableName: "jq",
|
||||
cwd: nil)
|
||||
|
||||
#expect(!ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName))
|
||||
}
|
||||
|
||||
private static func makeExecutable(named name: String) throws -> (root: URL, path: String) {
|
||||
let root = FileManager.default.temporaryDirectory
|
||||
.appendingPathComponent("openclaw-skill-bin-\(UUID().uuidString)", isDirectory: true)
|
||||
try FileManager.default.createDirectory(at: root, withIntermediateDirectories: true)
|
||||
let file = root.appendingPathComponent(name)
|
||||
try "#!/bin/sh\nexit 0\n".write(to: file, atomically: true, encoding: .utf8)
|
||||
try FileManager.default.setAttributes(
|
||||
[.posixPermissions: NSNumber(value: Int16(0o755))],
|
||||
ofItemAtPath: file.path)
|
||||
return (root, file.path)
|
||||
}
|
||||
|
||||
private static func makeReport(bins: [String]) -> SkillsStatusReport {
|
||||
SkillsStatusReport(
|
||||
workspaceDir: "/tmp/workspace",
|
||||
managedSkillsDir: "/tmp/skills",
|
||||
skills: [
|
||||
SkillStatus(
|
||||
name: "test-skill",
|
||||
description: "test",
|
||||
source: "local",
|
||||
filePath: "/tmp/skills/test-skill/SKILL.md",
|
||||
baseDir: "/tmp/skills/test-skill",
|
||||
skillKey: "test-skill",
|
||||
primaryEnv: nil,
|
||||
emoji: nil,
|
||||
homepage: nil,
|
||||
always: false,
|
||||
disabled: false,
|
||||
eligible: true,
|
||||
requirements: SkillRequirements(bins: bins, env: [], config: []),
|
||||
missing: SkillMissing(bins: [], env: [], config: []),
|
||||
configChecks: [],
|
||||
install: [])
|
||||
])
|
||||
}
|
||||
}
|
||||
@ -73,7 +73,7 @@ await web_search({
|
||||
## Notes
|
||||
|
||||
- OpenClaw uses the Brave **Search** plan. If you have a legacy subscription (e.g. the original Free plan with 2,000 queries/month), it remains valid but does not include newer features like LLM Context or higher rate limits.
|
||||
- Each Brave plan includes **$5/month in free credit** (renewing). The Search plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans.
|
||||
- Each Brave plan includes **\$5/month in free credit** (renewing). The Search plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans.
|
||||
- The Search plan includes the LLM Context endpoint and AI inference rights. Storing results to train or tune models requires a plan with explicit storage rights. See the Brave [Terms of Service](https://api-dashboard.search.brave.com/terms-of-service).
|
||||
- Results are cached for 15 minutes by default (configurable via `cacheTtlMinutes`).
|
||||
|
||||
|
||||
36
docs/ci.md
36
docs/ci.md
@ -9,32 +9,32 @@ read_when:
|
||||
|
||||
# CI Pipeline
|
||||
|
||||
The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only docs or native code changed.
|
||||
The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only unrelated areas changed.
|
||||
|
||||
## Job Overview
|
||||
|
||||
| Job | Purpose | When it runs |
|
||||
| ----------------- | ------------------------------------------------------- | ------------------------------------------------- |
|
||||
| `docs-scope` | Detect docs-only changes | Always |
|
||||
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-docs PRs |
|
||||
| `check` | TypeScript types, lint, format | Push to `main`, or PRs with Node-relevant changes |
|
||||
| `check-docs` | Markdown lint + broken link check | Docs changed |
|
||||
| `code-analysis` | LOC threshold check (1000 lines) | PRs only |
|
||||
| `secrets` | Detect leaked secrets | Always |
|
||||
| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes |
|
||||
| `release-check` | Validate npm pack contents | After build |
|
||||
| `checks` | Node/Bun tests + protocol check | Non-docs, node changes |
|
||||
| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes |
|
||||
| `macos` | Swift lint/build/test + TS tests | PRs with macos changes |
|
||||
| `android` | Gradle build + tests | Non-docs, android changes |
|
||||
| Job | Purpose | When it runs |
|
||||
| ----------------- | ------------------------------------------------------- | ---------------------------------- |
|
||||
| `docs-scope` | Detect docs-only changes | Always |
|
||||
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-doc changes |
|
||||
| `check` | TypeScript types, lint, format | Non-docs, node changes |
|
||||
| `check-docs` | Markdown lint + broken link check | Docs changed |
|
||||
| `secrets` | Detect leaked secrets | Always |
|
||||
| `build-artifacts` | Build dist once, share with `release-check` | Pushes to `main`, node changes |
|
||||
| `release-check` | Validate npm pack contents | Pushes to `main` after build |
|
||||
| `checks` | Node tests + protocol check on PRs; Bun compat on push | Non-docs, node changes |
|
||||
| `compat-node22` | Minimum supported Node runtime compatibility | Pushes to `main`, node changes |
|
||||
| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes |
|
||||
| `macos` | Swift lint/build/test + TS tests | PRs with macos changes |
|
||||
| `android` | Gradle build + tests | Non-docs, android changes |
|
||||
|
||||
## Fail-Fast Order
|
||||
|
||||
Jobs are ordered so cheap checks fail before expensive ones run:
|
||||
|
||||
1. `docs-scope` + `code-analysis` + `check` (parallel, ~1-2 min)
|
||||
2. `build-artifacts` (blocked on above)
|
||||
3. `checks`, `checks-windows`, `macos`, `android` (blocked on build)
|
||||
1. `docs-scope` + `changed-scope` + `check` + `secrets` (parallel, cheap gates first)
|
||||
2. PRs: `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android`
|
||||
3. Pushes to `main`: `build-artifacts` + `release-check` + Bun compat + `compat-node22`
|
||||
|
||||
Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`.
|
||||
|
||||
|
||||
@ -18,77 +18,16 @@ This endpoint is **disabled by default**. Enable it in config first.
|
||||
Under the hood, requests are executed as a normal Gateway agent run (same codepath as
|
||||
`openclaw agent`), so routing/permissions/config match your Gateway.
|
||||
|
||||
## Authentication
|
||||
## Authentication, security, and routing
|
||||
|
||||
Uses the Gateway auth configuration. Send a bearer token:
|
||||
Operational behavior matches [OpenAI Chat Completions](/gateway/openai-http-api):
|
||||
|
||||
- `Authorization: Bearer <token>`
|
||||
- use `Authorization: Bearer <token>` with the normal Gateway auth config
|
||||
- treat the endpoint as full operator access for the gateway instance
|
||||
- select agents with `model: "openclaw:<agentId>"`, `model: "agent:<agentId>"`, or `x-openclaw-agent-id`
|
||||
- use `x-openclaw-session-key` for explicit session routing
|
||||
|
||||
Notes:
|
||||
|
||||
- When `gateway.auth.mode="token"`, use `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`).
|
||||
- When `gateway.auth.mode="password"`, use `gateway.auth.password` (or `OPENCLAW_GATEWAY_PASSWORD`).
|
||||
- If `gateway.auth.rateLimit` is configured and too many auth failures occur, the endpoint returns `429` with `Retry-After`.
|
||||
|
||||
## Security boundary (important)
|
||||
|
||||
Treat this endpoint as a **full operator-access** surface for the gateway instance.
|
||||
|
||||
- HTTP bearer auth here is not a narrow per-user scope model.
|
||||
- A valid Gateway token/password for this endpoint should be treated like an owner/operator credential.
|
||||
- Requests run through the same control-plane agent path as trusted operator actions.
|
||||
- There is no separate non-owner/per-user tool boundary on this endpoint; once a caller passes Gateway auth here, OpenClaw treats that caller as a trusted operator for this gateway.
|
||||
- If the target agent policy allows sensitive tools, this endpoint can use them.
|
||||
- Keep this endpoint on loopback/tailnet/private ingress only; do not expose it directly to the public internet.
|
||||
|
||||
See [Security](/gateway/security) and [Remote access](/gateway/remote).
|
||||
|
||||
## Choosing an agent
|
||||
|
||||
No custom headers required: encode the agent id in the OpenResponses `model` field:
|
||||
|
||||
- `model: "openclaw:<agentId>"` (example: `"openclaw:main"`, `"openclaw:beta"`)
|
||||
- `model: "agent:<agentId>"` (alias)
|
||||
|
||||
Or target a specific OpenClaw agent by header:
|
||||
|
||||
- `x-openclaw-agent-id: <agentId>` (default: `main`)
|
||||
|
||||
Advanced:
|
||||
|
||||
- `x-openclaw-session-key: <sessionKey>` to fully control session routing.
|
||||
|
||||
## Enabling the endpoint
|
||||
|
||||
Set `gateway.http.endpoints.responses.enabled` to `true`:
|
||||
|
||||
```json5
|
||||
{
|
||||
gateway: {
|
||||
http: {
|
||||
endpoints: {
|
||||
responses: { enabled: true },
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
## Disabling the endpoint
|
||||
|
||||
Set `gateway.http.endpoints.responses.enabled` to `false`:
|
||||
|
||||
```json5
|
||||
{
|
||||
gateway: {
|
||||
http: {
|
||||
endpoints: {
|
||||
responses: { enabled: false },
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
Enable or disable this endpoint with `gateway.http.endpoints.responses.enabled`.
|
||||
|
||||
## Session behavior
|
||||
|
||||
|
||||
@ -53,8 +53,8 @@ Think of the suites as “increasing realism” (and increasing flakiness/cost):
|
||||
- No real keys required
|
||||
- Should be fast and stable
|
||||
- Pool note:
|
||||
- OpenClaw uses Vitest `vmForks` on Node 22/23 for faster unit shards.
|
||||
- On Node 24+, OpenClaw automatically falls back to regular `forks` to avoid Node VM linking errors (`ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`).
|
||||
- OpenClaw uses Vitest `vmForks` on Node 22, 23, and 24 for faster unit shards.
|
||||
- On Node 25+, OpenClaw automatically falls back to regular `forks` until the repo is re-validated there.
|
||||
- Override manually with `OPENCLAW_TEST_VM_FORKS=0` (force `forks`) or `OPENCLAW_TEST_VM_FORKS=1` (force `vmForks`).
|
||||
|
||||
### E2E (gateway smoke)
|
||||
|
||||
138
docs/install/docker-vm-runtime.md
Normal file
138
docs/install/docker-vm-runtime.md
Normal file
@ -0,0 +1,138 @@
|
||||
---
|
||||
summary: "Shared Docker VM runtime steps for long-lived OpenClaw Gateway hosts"
|
||||
read_when:
|
||||
- You are deploying OpenClaw on a cloud VM with Docker
|
||||
- You need the shared binary bake, persistence, and update flow
|
||||
title: "Docker VM Runtime"
|
||||
---
|
||||
|
||||
# Docker VM Runtime
|
||||
|
||||
Shared runtime steps for VM-based Docker installs such as GCP, Hetzner, and similar VPS providers.
|
||||
|
||||
## Bake required binaries into the image
|
||||
|
||||
Installing binaries inside a running container is a trap.
|
||||
Anything installed at runtime will be lost on restart.
|
||||
|
||||
All external binaries required by skills must be installed at image build time.
|
||||
|
||||
The examples below show three common binaries only:
|
||||
|
||||
- `gog` for Gmail access
|
||||
- `goplaces` for Google Places
|
||||
- `wacli` for WhatsApp
|
||||
|
||||
These are examples, not a complete list.
|
||||
You may install as many binaries as needed using the same pattern.
|
||||
|
||||
If you add new skills later that depend on additional binaries, you must:
|
||||
|
||||
1. Update the Dockerfile
|
||||
2. Rebuild the image
|
||||
3. Restart the containers
|
||||
|
||||
**Example Dockerfile**
|
||||
|
||||
```dockerfile
|
||||
FROM node:24-bookworm
|
||||
|
||||
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Example binary 1: Gmail CLI
|
||||
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
|
||||
|
||||
# Example binary 2: Google Places CLI
|
||||
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
|
||||
|
||||
# Example binary 3: WhatsApp CLI
|
||||
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
|
||||
|
||||
# Add more binaries below using the same pattern
|
||||
|
||||
WORKDIR /app
|
||||
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
|
||||
COPY ui/package.json ./ui/package.json
|
||||
COPY scripts ./scripts
|
||||
|
||||
RUN corepack enable
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
COPY . .
|
||||
RUN pnpm build
|
||||
RUN pnpm ui:install
|
||||
RUN pnpm ui:build
|
||||
|
||||
ENV NODE_ENV=production
|
||||
|
||||
CMD ["node","dist/index.js"]
|
||||
```
|
||||
|
||||
## Build and launch
|
||||
|
||||
```bash
|
||||
docker compose build
|
||||
docker compose up -d openclaw-gateway
|
||||
```
|
||||
|
||||
If build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory.
|
||||
Use a larger machine class before retrying.
|
||||
|
||||
Verify binaries:
|
||||
|
||||
```bash
|
||||
docker compose exec openclaw-gateway which gog
|
||||
docker compose exec openclaw-gateway which goplaces
|
||||
docker compose exec openclaw-gateway which wacli
|
||||
```
|
||||
|
||||
Expected output:
|
||||
|
||||
```
|
||||
/usr/local/bin/gog
|
||||
/usr/local/bin/goplaces
|
||||
/usr/local/bin/wacli
|
||||
```
|
||||
|
||||
Verify Gateway:
|
||||
|
||||
```bash
|
||||
docker compose logs -f openclaw-gateway
|
||||
```
|
||||
|
||||
Expected output:
|
||||
|
||||
```
|
||||
[gateway] listening on ws://0.0.0.0:18789
|
||||
```
|
||||
|
||||
## What persists where
|
||||
|
||||
OpenClaw runs in Docker, but Docker is not the source of truth.
|
||||
All long-lived state must survive restarts, rebuilds, and reboots.
|
||||
|
||||
| Component | Location | Persistence mechanism | Notes |
|
||||
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
|
||||
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
|
||||
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
|
||||
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
|
||||
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
|
||||
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
|
||||
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
|
||||
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
|
||||
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
|
||||
| OS packages | Container filesystem | Docker image | Do not install at runtime |
|
||||
| Docker container | Ephemeral | Restartable | Safe to destroy |
|
||||
|
||||
## Updates
|
||||
|
||||
To update OpenClaw on the VM:
|
||||
|
||||
```bash
|
||||
git pull
|
||||
docker compose build
|
||||
docker compose up -d
|
||||
```
|
||||
@ -281,77 +281,20 @@ services:
|
||||
|
||||
---
|
||||
|
||||
## 10) Bake required binaries into the image (critical)
|
||||
## 10) Shared Docker VM runtime steps
|
||||
|
||||
Installing binaries inside a running container is a trap.
|
||||
Anything installed at runtime will be lost on restart.
|
||||
Use the shared runtime guide for the common Docker host flow:
|
||||
|
||||
All external binaries required by skills must be installed at image build time.
|
||||
|
||||
The examples below show three common binaries only:
|
||||
|
||||
- `gog` for Gmail access
|
||||
- `goplaces` for Google Places
|
||||
- `wacli` for WhatsApp
|
||||
|
||||
These are examples, not a complete list.
|
||||
You may install as many binaries as needed using the same pattern.
|
||||
|
||||
If you add new skills later that depend on additional binaries, you must:
|
||||
|
||||
1. Update the Dockerfile
|
||||
2. Rebuild the image
|
||||
3. Restart the containers
|
||||
|
||||
**Example Dockerfile**
|
||||
|
||||
```dockerfile
|
||||
FROM node:24-bookworm
|
||||
|
||||
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Example binary 1: Gmail CLI
|
||||
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
|
||||
|
||||
# Example binary 2: Google Places CLI
|
||||
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
|
||||
|
||||
# Example binary 3: WhatsApp CLI
|
||||
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
|
||||
|
||||
# Add more binaries below using the same pattern
|
||||
|
||||
WORKDIR /app
|
||||
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
|
||||
COPY ui/package.json ./ui/package.json
|
||||
COPY scripts ./scripts
|
||||
|
||||
RUN corepack enable
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
COPY . .
|
||||
RUN pnpm build
|
||||
RUN pnpm ui:install
|
||||
RUN pnpm ui:build
|
||||
|
||||
ENV NODE_ENV=production
|
||||
|
||||
CMD ["node","dist/index.js"]
|
||||
```
|
||||
- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
|
||||
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
|
||||
- [What persists where](/install/docker-vm-runtime#what-persists-where)
|
||||
- [Updates](/install/docker-vm-runtime#updates)
|
||||
|
||||
---
|
||||
|
||||
## 11) Build and launch
|
||||
## 11) GCP-specific launch notes
|
||||
|
||||
```bash
|
||||
docker compose build
|
||||
docker compose up -d openclaw-gateway
|
||||
```
|
||||
|
||||
If build fails with `Killed` / `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
|
||||
On GCP, if build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
|
||||
|
||||
When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing:
|
||||
|
||||
@ -361,39 +304,7 @@ docker compose run --rm openclaw-cli config set gateway.controlUi.allowedOrigins
|
||||
|
||||
If you changed the gateway port, replace `18789` with your configured port.
|
||||
|
||||
Verify binaries:
|
||||
|
||||
```bash
|
||||
docker compose exec openclaw-gateway which gog
|
||||
docker compose exec openclaw-gateway which goplaces
|
||||
docker compose exec openclaw-gateway which wacli
|
||||
```
|
||||
|
||||
Expected output:
|
||||
|
||||
```
|
||||
/usr/local/bin/gog
|
||||
/usr/local/bin/goplaces
|
||||
/usr/local/bin/wacli
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 12) Verify Gateway
|
||||
|
||||
```bash
|
||||
docker compose logs -f openclaw-gateway
|
||||
```
|
||||
|
||||
Success:
|
||||
|
||||
```
|
||||
[gateway] listening on ws://0.0.0.0:18789
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 13) Access from your laptop
|
||||
## 12) Access from your laptop
|
||||
|
||||
Create an SSH tunnel to forward the Gateway port:
|
||||
|
||||
@ -420,38 +331,8 @@ docker compose run --rm openclaw-cli devices list
|
||||
docker compose run --rm openclaw-cli devices approve <requestId>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## What persists where (source of truth)
|
||||
|
||||
OpenClaw runs in Docker, but Docker is not the source of truth.
|
||||
All long-lived state must survive restarts, rebuilds, and reboots.
|
||||
|
||||
| Component | Location | Persistence mechanism | Notes |
|
||||
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
|
||||
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
|
||||
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
|
||||
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
|
||||
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
|
||||
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
|
||||
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
|
||||
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
|
||||
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
|
||||
| OS packages | Container filesystem | Docker image | Do not install at runtime |
|
||||
| Docker container | Ephemeral | Restartable | Safe to destroy |
|
||||
|
||||
---
|
||||
|
||||
## Updates
|
||||
|
||||
To update OpenClaw on the VM:
|
||||
|
||||
```bash
|
||||
cd ~/openclaw
|
||||
git pull
|
||||
docker compose build
|
||||
docker compose up -d
|
||||
```
|
||||
Need the shared persistence and update reference again?
|
||||
See [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where) and [Docker VM Runtime updates](/install/docker-vm-runtime#updates).
|
||||
|
||||
---
|
||||
|
||||
|
||||
@ -202,107 +202,20 @@ services:
|
||||
|
||||
---
|
||||
|
||||
## 7) Bake required binaries into the image (critical)
|
||||
## 7) Shared Docker VM runtime steps
|
||||
|
||||
Installing binaries inside a running container is a trap.
|
||||
Anything installed at runtime will be lost on restart.
|
||||
Use the shared runtime guide for the common Docker host flow:
|
||||
|
||||
All external binaries required by skills must be installed at image build time.
|
||||
|
||||
The examples below show three common binaries only:
|
||||
|
||||
- `gog` for Gmail access
|
||||
- `goplaces` for Google Places
|
||||
- `wacli` for WhatsApp
|
||||
|
||||
These are examples, not a complete list.
|
||||
You may install as many binaries as needed using the same pattern.
|
||||
|
||||
If you add new skills later that depend on additional binaries, you must:
|
||||
|
||||
1. Update the Dockerfile
|
||||
2. Rebuild the image
|
||||
3. Restart the containers
|
||||
|
||||
**Example Dockerfile**
|
||||
|
||||
```dockerfile
|
||||
FROM node:24-bookworm
|
||||
|
||||
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Example binary 1: Gmail CLI
|
||||
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
|
||||
|
||||
# Example binary 2: Google Places CLI
|
||||
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
|
||||
|
||||
# Example binary 3: WhatsApp CLI
|
||||
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
|
||||
|
||||
# Add more binaries below using the same pattern
|
||||
|
||||
WORKDIR /app
|
||||
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
|
||||
COPY ui/package.json ./ui/package.json
|
||||
COPY scripts ./scripts
|
||||
|
||||
RUN corepack enable
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
COPY . .
|
||||
RUN pnpm build
|
||||
RUN pnpm ui:install
|
||||
RUN pnpm ui:build
|
||||
|
||||
ENV NODE_ENV=production
|
||||
|
||||
CMD ["node","dist/index.js"]
|
||||
```
|
||||
- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
|
||||
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
|
||||
- [What persists where](/install/docker-vm-runtime#what-persists-where)
|
||||
- [Updates](/install/docker-vm-runtime#updates)
|
||||
|
||||
---
|
||||
|
||||
## 8) Build and launch
|
||||
## 8) Hetzner-specific access
|
||||
|
||||
```bash
|
||||
docker compose build
|
||||
docker compose up -d openclaw-gateway
|
||||
```
|
||||
|
||||
Verify binaries:
|
||||
|
||||
```bash
|
||||
docker compose exec openclaw-gateway which gog
|
||||
docker compose exec openclaw-gateway which goplaces
|
||||
docker compose exec openclaw-gateway which wacli
|
||||
```
|
||||
|
||||
Expected output:
|
||||
|
||||
```
|
||||
/usr/local/bin/gog
|
||||
/usr/local/bin/goplaces
|
||||
/usr/local/bin/wacli
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 9) Verify Gateway
|
||||
|
||||
```bash
|
||||
docker compose logs -f openclaw-gateway
|
||||
```
|
||||
|
||||
Success:
|
||||
|
||||
```
|
||||
[gateway] listening on ws://0.0.0.0:18789
|
||||
```
|
||||
|
||||
From your laptop:
|
||||
After the shared build and launch steps, tunnel from your laptop:
|
||||
|
||||
```bash
|
||||
ssh -N -L 18789:127.0.0.1:18789 root@YOUR_VPS_IP
|
||||
@ -316,25 +229,7 @@ Paste your gateway token.
|
||||
|
||||
---
|
||||
|
||||
## What persists where (source of truth)
|
||||
|
||||
OpenClaw runs in Docker, but Docker is not the source of truth.
|
||||
All long-lived state must survive restarts, rebuilds, and reboots.
|
||||
|
||||
| Component | Location | Persistence mechanism | Notes |
|
||||
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
|
||||
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
|
||||
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
|
||||
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
|
||||
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
|
||||
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
|
||||
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
|
||||
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
|
||||
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
|
||||
| OS packages | Container filesystem | Docker image | Do not install at runtime |
|
||||
| Docker container | Ephemeral | Restartable | Safe to destroy |
|
||||
|
||||
---
|
||||
The shared persistence map lives in [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where).
|
||||
|
||||
## Infrastructure as Code (Terraform)
|
||||
|
||||
|
||||
@ -296,6 +296,12 @@ Inbound policy defaults to `disabled`. To enable inbound calls, set:
|
||||
}
|
||||
```
|
||||
|
||||
`inboundPolicy: "allowlist"` is a low-assurance caller-ID screen. The plugin
|
||||
normalizes the provider-supplied `From` value and compares it to `allowFrom`.
|
||||
Webhook verification authenticates provider delivery and payload integrity, but
|
||||
it does not prove PSTN/VoIP caller-number ownership. Treat `allowFrom` as
|
||||
caller-ID filtering, not strong caller identity.
|
||||
|
||||
Auto-responses use the agent system. Tune with:
|
||||
|
||||
- `responseModel`
|
||||
|
||||
@ -85,8 +85,8 @@ See [Memory](/concepts/memory).
|
||||
- **Kimi (Moonshot)**: `KIMI_API_KEY`, `MOONSHOT_API_KEY`, or `tools.web.search.kimi.apiKey`
|
||||
- **Perplexity Search API**: `PERPLEXITY_API_KEY`, `OPENROUTER_API_KEY`, or `tools.web.search.perplexity.apiKey`
|
||||
|
||||
**Brave Search free credit:** Each Brave plan includes $5/month in renewing
|
||||
free credit. The Search plan costs $5 per 1,000 requests, so the credit covers
|
||||
**Brave Search free credit:** Each Brave plan includes \$5/month in renewing
|
||||
free credit. The Search plan costs \$5 per 1,000 requests, so the credit covers
|
||||
1,000 requests/month at no charge. Set your usage limit in the Brave dashboard
|
||||
to avoid unexpected charges.
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@ title: "Tests"
|
||||
|
||||
- `pnpm test:force`: Kills any lingering gateway process holding the default control port, then runs the full Vitest suite with an isolated gateway port so server tests don’t collide with a running instance. Use this when a prior gateway run left port 18789 occupied.
|
||||
- `pnpm test:coverage`: Runs the unit suite with V8 coverage (via `vitest.unit.config.ts`). Global thresholds are 70% lines/branches/functions/statements. Coverage excludes integration-heavy entrypoints (CLI wiring, gateway/telegram bridges, webchat static server) to keep the target focused on unit-testable logic.
|
||||
- `pnpm test` on Node 24+: OpenClaw auto-disables Vitest `vmForks` and uses `forks` to avoid `ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`.
|
||||
- `pnpm test` on Node 22, 23, and 24 uses Vitest `vmForks` by default for faster startup. Node 25+ falls back to `forks` until re-validated. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`.
|
||||
- `pnpm test`: runs the fast core unit lane by default for quick local feedback.
|
||||
- `pnpm test:channels`: runs channel-heavy suites.
|
||||
- `pnpm test:extensions`: runs extension/plugin suites.
|
||||
|
||||
@ -167,93 +167,8 @@ openclaw onboard --non-interactive \
|
||||
`--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts.
|
||||
</Note>
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Gemini example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice gemini-api-key \
|
||||
--gemini-api-key "$GEMINI_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="Z.AI example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice zai-api-key \
|
||||
--zai-api-key "$ZAI_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="Vercel AI Gateway example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice ai-gateway-api-key \
|
||||
--ai-gateway-api-key "$AI_GATEWAY_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="Cloudflare AI Gateway example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice cloudflare-ai-gateway-api-key \
|
||||
--cloudflare-ai-gateway-account-id "your-account-id" \
|
||||
--cloudflare-ai-gateway-gateway-id "your-gateway-id" \
|
||||
--cloudflare-ai-gateway-api-key "$CLOUDFLARE_AI_GATEWAY_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="Moonshot example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice moonshot-api-key \
|
||||
--moonshot-api-key "$MOONSHOT_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="Synthetic example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice synthetic-api-key \
|
||||
--synthetic-api-key "$SYNTHETIC_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="OpenCode example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice opencode-zen \
|
||||
--opencode-zen-api-key "$OPENCODE_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog.
|
||||
</Accordion>
|
||||
<Accordion title="Ollama example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice ollama \
|
||||
--custom-model-id "qwen3.5:27b" \
|
||||
--accept-risk \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
Add `--custom-base-url "http://ollama-host:11434"` to target a remote Ollama instance.
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
Provider-specific command examples live in [CLI Automation](/start/wizard-cli-automation#provider-specific-examples).
|
||||
Use this reference page for flag semantics and step ordering.
|
||||
|
||||
### Add agent (non-interactive)
|
||||
|
||||
|
||||
@ -48,6 +48,8 @@ Gateway.
|
||||
- `openclaw`: managed, isolated browser (no extension required).
|
||||
- `chrome`: extension relay to your **system browser** (requires the OpenClaw
|
||||
extension to be attached to a tab).
|
||||
- `existing-session`: official Chrome MCP attach flow for a running Chrome
|
||||
profile.
|
||||
|
||||
Set `browser.defaultProfile: "openclaw"` if you want managed mode by default.
|
||||
|
||||
@ -77,6 +79,12 @@ Browser settings live in `~/.openclaw/openclaw.json`.
|
||||
profiles: {
|
||||
openclaw: { cdpPort: 18800, color: "#FF4500" },
|
||||
work: { cdpPort: 18801, color: "#0066CC" },
|
||||
chromeLive: {
|
||||
cdpPort: 18802,
|
||||
driver: "existing-session",
|
||||
attachOnly: true,
|
||||
color: "#00AA00",
|
||||
},
|
||||
remote: { cdpUrl: "http://10.0.0.42:9222", color: "#00AA00" },
|
||||
},
|
||||
},
|
||||
@ -100,6 +108,8 @@ Notes:
|
||||
- Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "chrome"` to opt into the Chrome extension relay.
|
||||
- Auto-detect order: system default browser if Chromium-based; otherwise Chrome → Brave → Edge → Chromium → Chrome Canary.
|
||||
- Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl` — set those only for remote CDP.
|
||||
- `driver: "existing-session"` uses Chrome DevTools MCP instead of raw CDP. Do
|
||||
not set `cdpUrl` for that driver.
|
||||
|
||||
## Use Brave (or another Chromium-based browser)
|
||||
|
||||
@ -264,11 +274,13 @@ OpenClaw supports multiple named profiles (routing configs). Profiles can be:
|
||||
- **openclaw-managed**: a dedicated Chromium-based browser instance with its own user data directory + CDP port
|
||||
- **remote**: an explicit CDP URL (Chromium-based browser running elsewhere)
|
||||
- **extension relay**: your existing Chrome tab(s) via the local relay + Chrome extension
|
||||
- **existing session**: your existing Chrome profile via Chrome DevTools MCP auto-connect
|
||||
|
||||
Defaults:
|
||||
|
||||
- The `openclaw` profile is auto-created if missing.
|
||||
- The `chrome` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default).
|
||||
- Existing-session profiles are opt-in; create them with `--driver existing-session`.
|
||||
- Local CDP ports allocate from **18800–18899** by default.
|
||||
- Deleting a profile moves its local data directory to Trash.
|
||||
|
||||
@ -328,6 +340,66 @@ Notes:
|
||||
|
||||
- This mode relies on Playwright-on-CDP for most operations (screenshots/snapshots/actions).
|
||||
- Detach by clicking the extension icon again.
|
||||
|
||||
## Chrome existing-session via MCP
|
||||
|
||||
OpenClaw can also attach to a running Chrome profile through the official
|
||||
Chrome DevTools MCP server. This reuses the tabs and login state already open in
|
||||
that Chrome profile.
|
||||
|
||||
Official background and setup references:
|
||||
|
||||
- [Chrome for Developers: Use Chrome DevTools MCP with your browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
|
||||
- [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp)
|
||||
|
||||
Create a profile:
|
||||
|
||||
```bash
|
||||
openclaw browser create-profile \
|
||||
--name chrome-live \
|
||||
--driver existing-session \
|
||||
--color "#00AA00"
|
||||
```
|
||||
|
||||
Then in Chrome:
|
||||
|
||||
1. Open `chrome://inspect/#remote-debugging`
|
||||
2. Enable remote debugging
|
||||
3. Keep Chrome running and approve the connection prompt when OpenClaw attaches
|
||||
|
||||
Live attach smoke test:
|
||||
|
||||
```bash
|
||||
openclaw browser --browser-profile chrome-live start
|
||||
openclaw browser --browser-profile chrome-live status
|
||||
openclaw browser --browser-profile chrome-live tabs
|
||||
openclaw browser --browser-profile chrome-live snapshot --format ai
|
||||
```
|
||||
|
||||
What success looks like:
|
||||
|
||||
- `status` shows `driver: existing-session`
|
||||
- `status` shows `running: true`
|
||||
- `tabs` lists your already-open Chrome tabs
|
||||
- `snapshot` returns refs from the selected live tab
|
||||
|
||||
What to check if attach does not work:
|
||||
|
||||
- Chrome is version `144+`
|
||||
- remote debugging is enabled at `chrome://inspect/#remote-debugging`
|
||||
- Chrome showed and you accepted the attach consent prompt
|
||||
- the Gateway or node host can spawn `npx chrome-devtools-mcp@latest --autoConnect`
|
||||
|
||||
Notes:
|
||||
|
||||
- This path is higher-risk than the isolated `openclaw` profile because it can
|
||||
act inside your signed-in browser session.
|
||||
- OpenClaw does not launch Chrome for this driver; it attaches to an existing
|
||||
session only.
|
||||
- OpenClaw uses the official Chrome DevTools MCP `--autoConnect` flow here, not
|
||||
the legacy default-profile remote debugging port workflow.
|
||||
- Some features still require the extension relay or managed browser path, such
|
||||
as PDF export and download interception.
|
||||
- Leave the relay loopback-only by default. If the relay must be reachable from a different network namespace (for example Gateway in WSL2, Chrome on Windows), set `browser.relayBindHost` to an explicit bind address such as `0.0.0.0` while keeping the surrounding network private and authenticated.
|
||||
|
||||
WSL2 / cross-namespace example:
|
||||
|
||||
@ -13,6 +13,13 @@ The OpenClaw Chrome extension lets the agent control your **existing Chrome tabs
|
||||
|
||||
Attach/detach happens via a **single Chrome toolbar button**.
|
||||
|
||||
If you want Chrome’s official DevTools MCP attach flow instead of the OpenClaw
|
||||
extension relay, use an `existing-session` browser profile instead. See
|
||||
[Browser](/tools/browser#chrome-existing-session-via-mcp). For Chrome’s own
|
||||
setup docs, see [Chrome for Developers: Use Chrome DevTools MCP with your
|
||||
browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
|
||||
and the [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp).
|
||||
|
||||
## What it is (concept)
|
||||
|
||||
There are three parts:
|
||||
|
||||
@ -65,8 +65,8 @@ Use `openclaw configure --section web` to set up your API key and choose a provi
|
||||
2. In the dashboard, choose the **Search** plan and generate an API key.
|
||||
3. Run `openclaw configure --section web` to store the key in config, or set `BRAVE_API_KEY` in your environment.
|
||||
|
||||
Each Brave plan includes **$5/month in free credit** (renewing). The Search
|
||||
plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set
|
||||
Each Brave plan includes **\$5/month in free credit** (renewing). The Search
|
||||
plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set
|
||||
your usage limit in the Brave dashboard to avoid unexpected charges. See the
|
||||
[Brave API portal](https://brave.com/search/api/) for current plans and
|
||||
pricing.
|
||||
|
||||
@ -54,6 +54,49 @@ describe("acpx ensure", () => {
|
||||
}
|
||||
});
|
||||
|
||||
function mockEnsureInstallFlow() {
|
||||
spawnAndCollectMock
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "acpx 0.0.9\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "added 1 package\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
});
|
||||
}
|
||||
|
||||
function expectEnsureInstallCalls(stripProviderAuthEnvVars?: boolean) {
|
||||
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
args: ["--version"],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars,
|
||||
});
|
||||
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
|
||||
command: "npm",
|
||||
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars,
|
||||
});
|
||||
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
args: ["--version"],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars,
|
||||
});
|
||||
}
|
||||
|
||||
it("accepts the pinned acpx version", async () => {
|
||||
spawnAndCollectMock.mockResolvedValueOnce({
|
||||
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
|
||||
@ -177,25 +220,7 @@ describe("acpx ensure", () => {
|
||||
});
|
||||
|
||||
it("installs and verifies pinned acpx when precheck fails", async () => {
|
||||
spawnAndCollectMock
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "acpx 0.0.9\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "added 1 package\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
});
|
||||
mockEnsureInstallFlow();
|
||||
|
||||
await ensureAcpx({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
@ -204,33 +229,11 @@ describe("acpx ensure", () => {
|
||||
});
|
||||
|
||||
expect(spawnAndCollectMock).toHaveBeenCalledTimes(3);
|
||||
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
|
||||
command: "npm",
|
||||
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
|
||||
cwd: "/plugin",
|
||||
});
|
||||
expectEnsureInstallCalls();
|
||||
});
|
||||
|
||||
it("threads stripProviderAuthEnvVars through version probes and install", async () => {
|
||||
spawnAndCollectMock
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "acpx 0.0.9\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "added 1 package\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
});
|
||||
mockEnsureInstallFlow();
|
||||
|
||||
await ensureAcpx({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
@ -239,24 +242,7 @@ describe("acpx ensure", () => {
|
||||
stripProviderAuthEnvVars: true,
|
||||
});
|
||||
|
||||
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
args: ["--version"],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars: true,
|
||||
});
|
||||
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
|
||||
command: "npm",
|
||||
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars: true,
|
||||
});
|
||||
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
args: ["--version"],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars: true,
|
||||
});
|
||||
expectEnsureInstallCalls(true);
|
||||
});
|
||||
|
||||
it("fails with actionable error when npm install fails", async () => {
|
||||
|
||||
@ -22,6 +22,45 @@ export type DownloadMessageResourceResult = {
|
||||
fileName?: string;
|
||||
};
|
||||
|
||||
function createConfiguredFeishuMediaClient(params: { cfg: ClawdbotConfig; accountId?: string }): {
|
||||
account: ReturnType<typeof resolveFeishuAccount>;
|
||||
client: ReturnType<typeof createFeishuClient>;
|
||||
} {
|
||||
const account = resolveFeishuAccount({ cfg: params.cfg, accountId: params.accountId });
|
||||
if (!account.configured) {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
return {
|
||||
account,
|
||||
client: createFeishuClient({
|
||||
...account,
|
||||
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
function extractFeishuUploadKey(
|
||||
response: unknown,
|
||||
params: {
|
||||
key: "image_key" | "file_key";
|
||||
errorPrefix: string;
|
||||
},
|
||||
): string {
|
||||
// SDK v1.30+ returns data directly without code wrapper on success.
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- SDK response type
|
||||
const responseAny = response as any;
|
||||
if (responseAny.code !== undefined && responseAny.code !== 0) {
|
||||
throw new Error(`${params.errorPrefix}: ${responseAny.msg || `code ${responseAny.code}`}`);
|
||||
}
|
||||
|
||||
const key = responseAny[params.key] ?? responseAny.data?.[params.key];
|
||||
if (!key) {
|
||||
throw new Error(`${params.errorPrefix}: no ${params.key} returned`);
|
||||
}
|
||||
return key;
|
||||
}
|
||||
|
||||
async function readFeishuResponseBuffer(params: {
|
||||
response: unknown;
|
||||
tmpDirPrefix: string;
|
||||
@ -94,15 +133,7 @@ export async function downloadImageFeishu(params: {
|
||||
if (!normalizedImageKey) {
|
||||
throw new Error("Feishu image download failed: invalid image_key");
|
||||
}
|
||||
const account = resolveFeishuAccount({ cfg, accountId });
|
||||
if (!account.configured) {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient({
|
||||
...account,
|
||||
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
|
||||
});
|
||||
const { client } = createConfiguredFeishuMediaClient({ cfg, accountId });
|
||||
|
||||
const response = await client.im.image.get({
|
||||
path: { image_key: normalizedImageKey },
|
||||
@ -132,15 +163,7 @@ export async function downloadMessageResourceFeishu(params: {
|
||||
if (!normalizedFileKey) {
|
||||
throw new Error("Feishu message resource download failed: invalid file_key");
|
||||
}
|
||||
const account = resolveFeishuAccount({ cfg, accountId });
|
||||
if (!account.configured) {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient({
|
||||
...account,
|
||||
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
|
||||
});
|
||||
const { client } = createConfiguredFeishuMediaClient({ cfg, accountId });
|
||||
|
||||
const response = await client.im.messageResource.get({
|
||||
path: { message_id: messageId, file_key: normalizedFileKey },
|
||||
@ -179,15 +202,7 @@ export async function uploadImageFeishu(params: {
|
||||
accountId?: string;
|
||||
}): Promise<UploadImageResult> {
|
||||
const { cfg, image, imageType = "message", accountId } = params;
|
||||
const account = resolveFeishuAccount({ cfg, accountId });
|
||||
if (!account.configured) {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient({
|
||||
...account,
|
||||
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
|
||||
});
|
||||
const { client } = createConfiguredFeishuMediaClient({ cfg, accountId });
|
||||
|
||||
// SDK accepts Buffer directly or fs.ReadStream for file paths
|
||||
// Using Readable.from(buffer) causes issues with form-data library
|
||||
@ -202,20 +217,12 @@ export async function uploadImageFeishu(params: {
|
||||
},
|
||||
});
|
||||
|
||||
// SDK v1.30+ returns data directly without code wrapper on success
|
||||
// On error, it throws or returns { code, msg }
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- SDK response type
|
||||
const responseAny = response as any;
|
||||
if (responseAny.code !== undefined && responseAny.code !== 0) {
|
||||
throw new Error(`Feishu image upload failed: ${responseAny.msg || `code ${responseAny.code}`}`);
|
||||
}
|
||||
|
||||
const imageKey = responseAny.image_key ?? responseAny.data?.image_key;
|
||||
if (!imageKey) {
|
||||
throw new Error("Feishu image upload failed: no image_key returned");
|
||||
}
|
||||
|
||||
return { imageKey };
|
||||
return {
|
||||
imageKey: extractFeishuUploadKey(response, {
|
||||
key: "image_key",
|
||||
errorPrefix: "Feishu image upload failed",
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
@ -249,15 +256,7 @@ export async function uploadFileFeishu(params: {
|
||||
accountId?: string;
|
||||
}): Promise<UploadFileResult> {
|
||||
const { cfg, file, fileName, fileType, duration, accountId } = params;
|
||||
const account = resolveFeishuAccount({ cfg, accountId });
|
||||
if (!account.configured) {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient({
|
||||
...account,
|
||||
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
|
||||
});
|
||||
const { client } = createConfiguredFeishuMediaClient({ cfg, accountId });
|
||||
|
||||
// SDK accepts Buffer directly or fs.ReadStream for file paths
|
||||
// Using Readable.from(buffer) causes issues with form-data library
|
||||
@ -276,19 +275,12 @@ export async function uploadFileFeishu(params: {
|
||||
},
|
||||
});
|
||||
|
||||
// SDK v1.30+ returns data directly without code wrapper on success
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- SDK response type
|
||||
const responseAny = response as any;
|
||||
if (responseAny.code !== undefined && responseAny.code !== 0) {
|
||||
throw new Error(`Feishu file upload failed: ${responseAny.msg || `code ${responseAny.code}`}`);
|
||||
}
|
||||
|
||||
const fileKey = responseAny.file_key ?? responseAny.data?.file_key;
|
||||
if (!fileKey) {
|
||||
throw new Error("Feishu file upload failed: no file_key returned");
|
||||
}
|
||||
|
||||
return { fileKey };
|
||||
return {
|
||||
fileKey: extractFeishuUploadKey(response, {
|
||||
key: "file_key",
|
||||
errorPrefix: "Feishu file upload failed",
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -1,9 +1,7 @@
|
||||
import crypto from "node:crypto";
|
||||
import { createServer } from "node:http";
|
||||
import type { AddressInfo } from "node:net";
|
||||
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { createFeishuRuntimeMockModule } from "./monitor.test-mocks.js";
|
||||
import { withRunningWebhookMonitor } from "./monitor.webhook.test-helpers.js";
|
||||
|
||||
const probeFeishuMock = vi.hoisted(() => vi.fn());
|
||||
|
||||
@ -23,61 +21,6 @@ vi.mock("./runtime.js", () => createFeishuRuntimeMockModule());
|
||||
|
||||
import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js";
|
||||
|
||||
async function getFreePort(): Promise<number> {
|
||||
const server = createServer();
|
||||
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
|
||||
const address = server.address() as AddressInfo | null;
|
||||
if (!address) {
|
||||
throw new Error("missing server address");
|
||||
}
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
return address.port;
|
||||
}
|
||||
|
||||
async function waitUntilServerReady(url: string): Promise<void> {
|
||||
for (let i = 0; i < 50; i += 1) {
|
||||
try {
|
||||
const response = await fetch(url, { method: "GET" });
|
||||
if (response.status >= 200 && response.status < 500) {
|
||||
return;
|
||||
}
|
||||
} catch {
|
||||
// retry
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 20));
|
||||
}
|
||||
throw new Error(`server did not start: ${url}`);
|
||||
}
|
||||
|
||||
function buildConfig(params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
port: number;
|
||||
verificationToken?: string;
|
||||
encryptKey?: string;
|
||||
}): ClawdbotConfig {
|
||||
return {
|
||||
channels: {
|
||||
feishu: {
|
||||
enabled: true,
|
||||
accounts: {
|
||||
[params.accountId]: {
|
||||
enabled: true,
|
||||
appId: "cli_test",
|
||||
appSecret: "secret_test", // pragma: allowlist secret
|
||||
connectionMode: "webhook",
|
||||
webhookHost: "127.0.0.1",
|
||||
webhookPort: params.port,
|
||||
webhookPath: params.path,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
}
|
||||
|
||||
function signFeishuPayload(params: {
|
||||
encryptKey: string;
|
||||
payload: Record<string, unknown>;
|
||||
@ -107,43 +50,6 @@ function encryptFeishuPayload(encryptKey: string, payload: Record<string, unknow
|
||||
return Buffer.concat([iv, encrypted]).toString("base64");
|
||||
}
|
||||
|
||||
async function withRunningWebhookMonitor(
|
||||
params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
verificationToken: string;
|
||||
encryptKey: string;
|
||||
},
|
||||
run: (url: string) => Promise<void>,
|
||||
) {
|
||||
const port = await getFreePort();
|
||||
const cfg = buildConfig({
|
||||
accountId: params.accountId,
|
||||
path: params.path,
|
||||
port,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
|
||||
const monitorPromise = monitorFeishuProvider({
|
||||
config: cfg,
|
||||
runtime,
|
||||
abortSignal: abortController.signal,
|
||||
});
|
||||
|
||||
const url = `http://127.0.0.1:${port}${params.path}`;
|
||||
await waitUntilServerReady(url);
|
||||
|
||||
try {
|
||||
await run(url);
|
||||
} finally {
|
||||
abortController.abort();
|
||||
await monitorPromise;
|
||||
}
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
stopFeishuMonitor();
|
||||
});
|
||||
@ -159,6 +65,7 @@ describe("Feishu webhook signed-request e2e", () => {
|
||||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const payload = { type: "url_verification", challenge: "challenge-token" };
|
||||
const response = await fetch(url, {
|
||||
@ -185,6 +92,7 @@ describe("Feishu webhook signed-request e2e", () => {
|
||||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
@ -208,6 +116,7 @@ describe("Feishu webhook signed-request e2e", () => {
|
||||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
@ -231,6 +140,7 @@ describe("Feishu webhook signed-request e2e", () => {
|
||||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const payload = { type: "url_verification", challenge: "challenge-token" };
|
||||
const response = await fetch(url, {
|
||||
@ -255,6 +165,7 @@ describe("Feishu webhook signed-request e2e", () => {
|
||||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const payload = {
|
||||
schema: "2.0",
|
||||
@ -283,6 +194,7 @@ describe("Feishu webhook signed-request e2e", () => {
|
||||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const payload = {
|
||||
encrypt: encryptFeishuPayload("encrypt_key", {
|
||||
|
||||
@ -1,11 +1,13 @@
|
||||
import { createServer } from "node:http";
|
||||
import type { AddressInfo } from "node:net";
|
||||
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
createFeishuClientMockModule,
|
||||
createFeishuRuntimeMockModule,
|
||||
} from "./monitor.test-mocks.js";
|
||||
import {
|
||||
buildWebhookConfig,
|
||||
getFreePort,
|
||||
withRunningWebhookMonitor,
|
||||
} from "./monitor.webhook.test-helpers.js";
|
||||
|
||||
const probeFeishuMock = vi.hoisted(() => vi.fn());
|
||||
|
||||
@ -33,98 +35,6 @@ import {
|
||||
stopFeishuMonitor,
|
||||
} from "./monitor.js";
|
||||
|
||||
async function getFreePort(): Promise<number> {
|
||||
const server = createServer();
|
||||
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
|
||||
const address = server.address() as AddressInfo | null;
|
||||
if (!address) {
|
||||
throw new Error("missing server address");
|
||||
}
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
return address.port;
|
||||
}
|
||||
|
||||
async function waitUntilServerReady(url: string): Promise<void> {
|
||||
for (let i = 0; i < 50; i += 1) {
|
||||
try {
|
||||
const response = await fetch(url, { method: "GET" });
|
||||
if (response.status >= 200 && response.status < 500) {
|
||||
return;
|
||||
}
|
||||
} catch {
|
||||
// retry
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 20));
|
||||
}
|
||||
throw new Error(`server did not start: ${url}`);
|
||||
}
|
||||
|
||||
function buildConfig(params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
port: number;
|
||||
verificationToken?: string;
|
||||
encryptKey?: string;
|
||||
}): ClawdbotConfig {
|
||||
return {
|
||||
channels: {
|
||||
feishu: {
|
||||
enabled: true,
|
||||
accounts: {
|
||||
[params.accountId]: {
|
||||
enabled: true,
|
||||
appId: "cli_test",
|
||||
appSecret: "secret_test", // pragma: allowlist secret
|
||||
connectionMode: "webhook",
|
||||
webhookHost: "127.0.0.1",
|
||||
webhookPort: params.port,
|
||||
webhookPath: params.path,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
}
|
||||
|
||||
async function withRunningWebhookMonitor(
|
||||
params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
verificationToken: string;
|
||||
encryptKey: string;
|
||||
},
|
||||
run: (url: string) => Promise<void>,
|
||||
) {
|
||||
const port = await getFreePort();
|
||||
const cfg = buildConfig({
|
||||
accountId: params.accountId,
|
||||
path: params.path,
|
||||
port,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
|
||||
const monitorPromise = monitorFeishuProvider({
|
||||
config: cfg,
|
||||
runtime,
|
||||
abortSignal: abortController.signal,
|
||||
});
|
||||
|
||||
const url = `http://127.0.0.1:${port}${params.path}`;
|
||||
await waitUntilServerReady(url);
|
||||
|
||||
try {
|
||||
await run(url);
|
||||
} finally {
|
||||
abortController.abort();
|
||||
await monitorPromise;
|
||||
}
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
clearFeishuWebhookRateLimitStateForTest();
|
||||
stopFeishuMonitor();
|
||||
@ -134,7 +44,7 @@ describe("Feishu webhook security hardening", () => {
|
||||
it("rejects webhook mode without verificationToken", async () => {
|
||||
probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" });
|
||||
|
||||
const cfg = buildConfig({
|
||||
const cfg = buildWebhookConfig({
|
||||
accountId: "missing-token",
|
||||
path: "/hook-missing-token",
|
||||
port: await getFreePort(),
|
||||
@ -148,7 +58,7 @@ describe("Feishu webhook security hardening", () => {
|
||||
it("rejects webhook mode without encryptKey", async () => {
|
||||
probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" });
|
||||
|
||||
const cfg = buildConfig({
|
||||
const cfg = buildWebhookConfig({
|
||||
accountId: "missing-encrypt-key",
|
||||
path: "/hook-missing-encrypt",
|
||||
port: await getFreePort(),
|
||||
@ -167,6 +77,7 @@ describe("Feishu webhook security hardening", () => {
|
||||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
@ -189,6 +100,7 @@ describe("Feishu webhook security hardening", () => {
|
||||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
let saw429 = false;
|
||||
for (let i = 0; i < 130; i += 1) {
|
||||
|
||||
98
extensions/feishu/src/monitor.webhook.test-helpers.ts
Normal file
98
extensions/feishu/src/monitor.webhook.test-helpers.ts
Normal file
@ -0,0 +1,98 @@
|
||||
import { createServer } from "node:http";
|
||||
import type { AddressInfo } from "node:net";
|
||||
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
|
||||
import { vi } from "vitest";
|
||||
import type { monitorFeishuProvider } from "./monitor.js";
|
||||
|
||||
export async function getFreePort(): Promise<number> {
|
||||
const server = createServer();
|
||||
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
|
||||
const address = server.address() as AddressInfo | null;
|
||||
if (!address) {
|
||||
throw new Error("missing server address");
|
||||
}
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
return address.port;
|
||||
}
|
||||
|
||||
async function waitUntilServerReady(url: string): Promise<void> {
|
||||
for (let i = 0; i < 50; i += 1) {
|
||||
try {
|
||||
const response = await fetch(url, { method: "GET" });
|
||||
if (response.status >= 200 && response.status < 500) {
|
||||
return;
|
||||
}
|
||||
} catch {
|
||||
// retry
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 20));
|
||||
}
|
||||
throw new Error(`server did not start: ${url}`);
|
||||
}
|
||||
|
||||
export function buildWebhookConfig(params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
port: number;
|
||||
verificationToken?: string;
|
||||
encryptKey?: string;
|
||||
}): ClawdbotConfig {
|
||||
return {
|
||||
channels: {
|
||||
feishu: {
|
||||
enabled: true,
|
||||
accounts: {
|
||||
[params.accountId]: {
|
||||
enabled: true,
|
||||
appId: "cli_test",
|
||||
appSecret: "secret_test", // pragma: allowlist secret
|
||||
connectionMode: "webhook",
|
||||
webhookHost: "127.0.0.1",
|
||||
webhookPort: params.port,
|
||||
webhookPath: params.path,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
}
|
||||
|
||||
export async function withRunningWebhookMonitor(
|
||||
params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
verificationToken: string;
|
||||
encryptKey: string;
|
||||
},
|
||||
monitor: typeof monitorFeishuProvider,
|
||||
run: (url: string) => Promise<void>,
|
||||
) {
|
||||
const port = await getFreePort();
|
||||
const cfg = buildWebhookConfig({
|
||||
accountId: params.accountId,
|
||||
path: params.path,
|
||||
port,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
|
||||
const monitorPromise = monitor({
|
||||
config: cfg,
|
||||
runtime,
|
||||
abortSignal: abortController.signal,
|
||||
});
|
||||
|
||||
const url = `http://127.0.0.1:${port}${params.path}`;
|
||||
await waitUntilServerReady(url);
|
||||
|
||||
try {
|
||||
await run(url);
|
||||
} finally {
|
||||
abortController.abort();
|
||||
await monitorPromise;
|
||||
}
|
||||
}
|
||||
@ -9,6 +9,20 @@ export type FeishuReaction = {
|
||||
operatorId: string;
|
||||
};
|
||||
|
||||
function resolveConfiguredFeishuClient(params: { cfg: ClawdbotConfig; accountId?: string }) {
|
||||
const account = resolveFeishuAccount(params);
|
||||
if (!account.configured) {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
return createFeishuClient(account);
|
||||
}
|
||||
|
||||
function assertFeishuReactionApiSuccess(response: { code?: number; msg?: string }, action: string) {
|
||||
if (response.code !== 0) {
|
||||
throw new Error(`Feishu ${action} failed: ${response.msg || `code ${response.code}`}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a reaction (emoji) to a message.
|
||||
* @param emojiType - Feishu emoji type, e.g., "SMILE", "THUMBSUP", "HEART"
|
||||
@ -21,12 +35,7 @@ export async function addReactionFeishu(params: {
|
||||
accountId?: string;
|
||||
}): Promise<{ reactionId: string }> {
|
||||
const { cfg, messageId, emojiType, accountId } = params;
|
||||
const account = resolveFeishuAccount({ cfg, accountId });
|
||||
if (!account.configured) {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient(account);
|
||||
const client = resolveConfiguredFeishuClient({ cfg, accountId });
|
||||
|
||||
const response = (await client.im.messageReaction.create({
|
||||
path: { message_id: messageId },
|
||||
@ -41,9 +50,7 @@ export async function addReactionFeishu(params: {
|
||||
data?: { reaction_id?: string };
|
||||
};
|
||||
|
||||
if (response.code !== 0) {
|
||||
throw new Error(`Feishu add reaction failed: ${response.msg || `code ${response.code}`}`);
|
||||
}
|
||||
assertFeishuReactionApiSuccess(response, "add reaction");
|
||||
|
||||
const reactionId = response.data?.reaction_id;
|
||||
if (!reactionId) {
|
||||
@ -63,12 +70,7 @@ export async function removeReactionFeishu(params: {
|
||||
accountId?: string;
|
||||
}): Promise<void> {
|
||||
const { cfg, messageId, reactionId, accountId } = params;
|
||||
const account = resolveFeishuAccount({ cfg, accountId });
|
||||
if (!account.configured) {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient(account);
|
||||
const client = resolveConfiguredFeishuClient({ cfg, accountId });
|
||||
|
||||
const response = (await client.im.messageReaction.delete({
|
||||
path: {
|
||||
@ -77,9 +79,7 @@ export async function removeReactionFeishu(params: {
|
||||
},
|
||||
})) as { code?: number; msg?: string };
|
||||
|
||||
if (response.code !== 0) {
|
||||
throw new Error(`Feishu remove reaction failed: ${response.msg || `code ${response.code}`}`);
|
||||
}
|
||||
assertFeishuReactionApiSuccess(response, "remove reaction");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -92,12 +92,7 @@ export async function listReactionsFeishu(params: {
|
||||
accountId?: string;
|
||||
}): Promise<FeishuReaction[]> {
|
||||
const { cfg, messageId, emojiType, accountId } = params;
|
||||
const account = resolveFeishuAccount({ cfg, accountId });
|
||||
if (!account.configured) {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient(account);
|
||||
const client = resolveConfiguredFeishuClient({ cfg, accountId });
|
||||
|
||||
const response = (await client.im.messageReaction.list({
|
||||
path: { message_id: messageId },
|
||||
@ -115,9 +110,7 @@ export async function listReactionsFeishu(params: {
|
||||
};
|
||||
};
|
||||
|
||||
if (response.code !== 0) {
|
||||
throw new Error(`Feishu list reactions failed: ${response.msg || `code ${response.code}`}`);
|
||||
}
|
||||
assertFeishuReactionApiSuccess(response, "list reactions");
|
||||
|
||||
const items = response.data?.items ?? [];
|
||||
return items.map((item) => ({
|
||||
|
||||
@ -43,6 +43,10 @@ function isWithdrawnReplyError(err: unknown): boolean {
|
||||
type FeishuCreateMessageClient = {
|
||||
im: {
|
||||
message: {
|
||||
reply: (opts: {
|
||||
path: { message_id: string };
|
||||
data: { content: string; msg_type: string; reply_in_thread?: true };
|
||||
}) => Promise<{ code?: number; msg?: string; data?: { message_id?: string } }>;
|
||||
create: (opts: {
|
||||
params: { receive_id_type: "chat_id" | "email" | "open_id" | "union_id" | "user_id" };
|
||||
data: { receive_id: string; content: string; msg_type: string };
|
||||
@ -74,6 +78,50 @@ async function sendFallbackDirect(
|
||||
return toFeishuSendResult(response, params.receiveId);
|
||||
}
|
||||
|
||||
async function sendReplyOrFallbackDirect(
|
||||
client: FeishuCreateMessageClient,
|
||||
params: {
|
||||
replyToMessageId?: string;
|
||||
replyInThread?: boolean;
|
||||
content: string;
|
||||
msgType: string;
|
||||
directParams: {
|
||||
receiveId: string;
|
||||
receiveIdType: "chat_id" | "email" | "open_id" | "union_id" | "user_id";
|
||||
content: string;
|
||||
msgType: string;
|
||||
};
|
||||
directErrorPrefix: string;
|
||||
replyErrorPrefix: string;
|
||||
},
|
||||
): Promise<FeishuSendResult> {
|
||||
if (!params.replyToMessageId) {
|
||||
return sendFallbackDirect(client, params.directParams, params.directErrorPrefix);
|
||||
}
|
||||
|
||||
let response: { code?: number; msg?: string; data?: { message_id?: string } };
|
||||
try {
|
||||
response = await client.im.message.reply({
|
||||
path: { message_id: params.replyToMessageId },
|
||||
data: {
|
||||
content: params.content,
|
||||
msg_type: params.msgType,
|
||||
...(params.replyInThread ? { reply_in_thread: true } : {}),
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
if (!isWithdrawnReplyError(err)) {
|
||||
throw err;
|
||||
}
|
||||
return sendFallbackDirect(client, params.directParams, params.directErrorPrefix);
|
||||
}
|
||||
if (shouldFallbackFromReplyTarget(response)) {
|
||||
return sendFallbackDirect(client, params.directParams, params.directErrorPrefix);
|
||||
}
|
||||
assertFeishuMessageApiSuccess(response, params.replyErrorPrefix);
|
||||
return toFeishuSendResult(response, params.directParams.receiveId);
|
||||
}
|
||||
|
||||
function parseInteractiveCardContent(parsed: unknown): string {
|
||||
if (!parsed || typeof parsed !== "object") {
|
||||
return "[Interactive Card]";
|
||||
@ -290,32 +338,15 @@ export async function sendMessageFeishu(
|
||||
const { content, msgType } = buildFeishuPostMessagePayload({ messageText });
|
||||
|
||||
const directParams = { receiveId, receiveIdType, content, msgType };
|
||||
|
||||
if (replyToMessageId) {
|
||||
let response: { code?: number; msg?: string; data?: { message_id?: string } };
|
||||
try {
|
||||
response = await client.im.message.reply({
|
||||
path: { message_id: replyToMessageId },
|
||||
data: {
|
||||
content,
|
||||
msg_type: msgType,
|
||||
...(replyInThread ? { reply_in_thread: true } : {}),
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
if (!isWithdrawnReplyError(err)) {
|
||||
throw err;
|
||||
}
|
||||
return sendFallbackDirect(client, directParams, "Feishu send failed");
|
||||
}
|
||||
if (shouldFallbackFromReplyTarget(response)) {
|
||||
return sendFallbackDirect(client, directParams, "Feishu send failed");
|
||||
}
|
||||
assertFeishuMessageApiSuccess(response, "Feishu reply failed");
|
||||
return toFeishuSendResult(response, receiveId);
|
||||
}
|
||||
|
||||
return sendFallbackDirect(client, directParams, "Feishu send failed");
|
||||
return sendReplyOrFallbackDirect(client, {
|
||||
replyToMessageId,
|
||||
replyInThread,
|
||||
content,
|
||||
msgType,
|
||||
directParams,
|
||||
directErrorPrefix: "Feishu send failed",
|
||||
replyErrorPrefix: "Feishu reply failed",
|
||||
});
|
||||
}
|
||||
|
||||
export type SendFeishuCardParams = {
|
||||
@ -334,32 +365,15 @@ export async function sendCardFeishu(params: SendFeishuCardParams): Promise<Feis
|
||||
const content = JSON.stringify(card);
|
||||
|
||||
const directParams = { receiveId, receiveIdType, content, msgType: "interactive" };
|
||||
|
||||
if (replyToMessageId) {
|
||||
let response: { code?: number; msg?: string; data?: { message_id?: string } };
|
||||
try {
|
||||
response = await client.im.message.reply({
|
||||
path: { message_id: replyToMessageId },
|
||||
data: {
|
||||
content,
|
||||
msg_type: "interactive",
|
||||
...(replyInThread ? { reply_in_thread: true } : {}),
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
if (!isWithdrawnReplyError(err)) {
|
||||
throw err;
|
||||
}
|
||||
return sendFallbackDirect(client, directParams, "Feishu card send failed");
|
||||
}
|
||||
if (shouldFallbackFromReplyTarget(response)) {
|
||||
return sendFallbackDirect(client, directParams, "Feishu card send failed");
|
||||
}
|
||||
assertFeishuMessageApiSuccess(response, "Feishu card reply failed");
|
||||
return toFeishuSendResult(response, receiveId);
|
||||
}
|
||||
|
||||
return sendFallbackDirect(client, directParams, "Feishu card send failed");
|
||||
return sendReplyOrFallbackDirect(client, {
|
||||
replyToMessageId,
|
||||
replyInThread,
|
||||
content,
|
||||
msgType: "interactive",
|
||||
directParams,
|
||||
directErrorPrefix: "Feishu card send failed",
|
||||
replyErrorPrefix: "Feishu card reply failed",
|
||||
});
|
||||
}
|
||||
|
||||
export async function updateCardFeishu(params: {
|
||||
|
||||
@ -7,6 +7,9 @@
|
||||
"dependencies": {
|
||||
"google-auth-library": "^10.6.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"openclaw": "workspace:*"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"openclaw": ">=2026.3.11"
|
||||
},
|
||||
|
||||
@ -27,6 +27,28 @@ function createMockFetch(response?: { status?: number; body?: unknown; contentTy
|
||||
return { mockFetch: mockFetch as unknown as typeof fetch, calls };
|
||||
}
|
||||
|
||||
function createTestClient(response?: { status?: number; body?: unknown; contentType?: string }) {
|
||||
const { mockFetch, calls } = createMockFetch(response);
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
return { client, calls };
|
||||
}
|
||||
|
||||
async function updatePostAndCapture(
|
||||
update: Parameters<typeof updateMattermostPost>[2],
|
||||
response?: { status?: number; body?: unknown; contentType?: string },
|
||||
) {
|
||||
const { client, calls } = createTestClient(response ?? { body: { id: "post1" } });
|
||||
await updateMattermostPost(client, "post1", update);
|
||||
return {
|
||||
calls,
|
||||
body: JSON.parse(calls[0].init?.body as string) as Record<string, unknown>,
|
||||
};
|
||||
}
|
||||
|
||||
// ── normalizeMattermostBaseUrl ────────────────────────────────────────
|
||||
|
||||
describe("normalizeMattermostBaseUrl", () => {
|
||||
@ -229,68 +251,38 @@ describe("createMattermostPost", () => {
|
||||
|
||||
describe("updateMattermostPost", () => {
|
||||
it("sends PUT to /posts/{id}", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await updateMattermostPost(client, "post1", { message: "Updated" });
|
||||
const { calls } = await updatePostAndCapture({ message: "Updated" });
|
||||
|
||||
expect(calls[0].url).toContain("/posts/post1");
|
||||
expect(calls[0].init?.method).toBe("PUT");
|
||||
});
|
||||
|
||||
it("includes post id in the body", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await updateMattermostPost(client, "post1", { message: "Updated" });
|
||||
|
||||
const body = JSON.parse(calls[0].init?.body as string);
|
||||
const { body } = await updatePostAndCapture({ message: "Updated" });
|
||||
expect(body.id).toBe("post1");
|
||||
expect(body.message).toBe("Updated");
|
||||
});
|
||||
|
||||
it("includes props for button completion updates", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await updateMattermostPost(client, "post1", {
|
||||
const { body } = await updatePostAndCapture({
|
||||
message: "Original message",
|
||||
props: {
|
||||
attachments: [{ text: "✓ **do_now** selected by @tony" }],
|
||||
},
|
||||
});
|
||||
|
||||
const body = JSON.parse(calls[0].init?.body as string);
|
||||
expect(body.message).toBe("Original message");
|
||||
expect(body.props.attachments[0].text).toContain("✓");
|
||||
expect(body.props.attachments[0].text).toContain("do_now");
|
||||
expect(body.props).toMatchObject({
|
||||
attachments: [{ text: expect.stringContaining("✓") }],
|
||||
});
|
||||
expect(body.props).toMatchObject({
|
||||
attachments: [{ text: expect.stringContaining("do_now") }],
|
||||
});
|
||||
});
|
||||
|
||||
it("omits message when not provided", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await updateMattermostPost(client, "post1", {
|
||||
const { body } = await updatePostAndCapture({
|
||||
props: { attachments: [] },
|
||||
});
|
||||
|
||||
const body = JSON.parse(calls[0].init?.body as string);
|
||||
expect(body.id).toBe("post1");
|
||||
expect(body.message).toBeUndefined();
|
||||
expect(body.props).toEqual({ attachments: [] });
|
||||
|
||||
@ -496,6 +496,104 @@ describe("createMattermostInteractionHandler", () => {
|
||||
return res as unknown as ServerResponse & { headers: Record<string, string>; body: string };
|
||||
}
|
||||
|
||||
function createActionContext(actionId = "approve", channelId = "chan-1") {
|
||||
const context = { action_id: actionId, __openclaw_channel_id: channelId };
|
||||
return { context, token: generateInteractionToken(context, "acct") };
|
||||
}
|
||||
|
||||
function createInteractionBody(params: {
|
||||
context: Record<string, unknown>;
|
||||
token: string;
|
||||
channelId?: string;
|
||||
postId?: string;
|
||||
userId?: string;
|
||||
userName?: string;
|
||||
}) {
|
||||
return {
|
||||
user_id: params.userId ?? "user-1",
|
||||
...(params.userName ? { user_name: params.userName } : {}),
|
||||
channel_id: params.channelId ?? "chan-1",
|
||||
post_id: params.postId ?? "post-1",
|
||||
context: { ...params.context, _token: params.token },
|
||||
};
|
||||
}
|
||||
|
||||
async function runHandler(
|
||||
handler: ReturnType<typeof createMattermostInteractionHandler>,
|
||||
params: {
|
||||
body: unknown;
|
||||
remoteAddress?: string;
|
||||
headers?: Record<string, string>;
|
||||
},
|
||||
) {
|
||||
const req = createReq({
|
||||
remoteAddress: params.remoteAddress,
|
||||
headers: params.headers,
|
||||
body: params.body,
|
||||
});
|
||||
const res = createRes();
|
||||
await handler(req, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
function expectForbiddenResponse(
|
||||
res: ServerResponse & { body: string },
|
||||
expectedMessage: string,
|
||||
) {
|
||||
expect(res.statusCode).toBe(403);
|
||||
expect(res.body).toContain(expectedMessage);
|
||||
}
|
||||
|
||||
function expectSuccessfulApprovalUpdate(
|
||||
res: ServerResponse & { body: string },
|
||||
requestLog?: Array<{ path: string; method?: string }>,
|
||||
) {
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(res.body).toBe("{}");
|
||||
if (requestLog) {
|
||||
expect(requestLog).toEqual([
|
||||
{ path: "/posts/post-1", method: undefined },
|
||||
{ path: "/posts/post-1", method: "PUT" },
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
function createActionPost(params?: {
|
||||
actionId?: string;
|
||||
actionName?: string;
|
||||
channelId?: string;
|
||||
rootId?: string;
|
||||
}): MattermostPost {
|
||||
return {
|
||||
id: "post-1",
|
||||
channel_id: params?.channelId ?? "chan-1",
|
||||
...(params?.rootId ? { root_id: params.rootId } : {}),
|
||||
message: "Choose",
|
||||
props: {
|
||||
attachments: [
|
||||
{
|
||||
actions: [
|
||||
{
|
||||
id: params?.actionId ?? "approve",
|
||||
name: params?.actionName ?? "Approve",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function createUnusedInteractionHandler() {
|
||||
return createMattermostInteractionHandler({
|
||||
client: {
|
||||
request: async () => ({ message: "unused" }),
|
||||
} as unknown as MattermostClient,
|
||||
botUserId: "bot",
|
||||
accountId: "acct",
|
||||
});
|
||||
}
|
||||
|
||||
async function runApproveInteraction(params?: {
|
||||
actionName?: string;
|
||||
allowedSourceIps?: string[];
|
||||
@ -503,8 +601,7 @@ describe("createMattermostInteractionHandler", () => {
|
||||
remoteAddress?: string;
|
||||
headers?: Record<string, string>;
|
||||
}) {
|
||||
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
|
||||
const token = generateInteractionToken(context, "acct");
|
||||
const { context, token } = createActionContext();
|
||||
const requestLog: Array<{ path: string; method?: string }> = [];
|
||||
const handler = createMattermostInteractionHandler({
|
||||
client: {
|
||||
@ -513,15 +610,7 @@ describe("createMattermostInteractionHandler", () => {
|
||||
if (init?.method === "PUT") {
|
||||
return { id: "post-1" };
|
||||
}
|
||||
return {
|
||||
channel_id: "chan-1",
|
||||
message: "Choose",
|
||||
props: {
|
||||
attachments: [
|
||||
{ actions: [{ id: "approve", name: params?.actionName ?? "Approve" }] },
|
||||
],
|
||||
},
|
||||
};
|
||||
return createActionPost({ actionName: params?.actionName });
|
||||
},
|
||||
} as unknown as MattermostClient,
|
||||
botUserId: "bot",
|
||||
@ -530,50 +619,27 @@ describe("createMattermostInteractionHandler", () => {
|
||||
trustedProxies: params?.trustedProxies,
|
||||
});
|
||||
|
||||
const req = createReq({
|
||||
const res = await runHandler(handler, {
|
||||
remoteAddress: params?.remoteAddress,
|
||||
headers: params?.headers,
|
||||
body: {
|
||||
user_id: "user-1",
|
||||
user_name: "alice",
|
||||
channel_id: "chan-1",
|
||||
post_id: "post-1",
|
||||
context: { ...context, _token: token },
|
||||
},
|
||||
body: createInteractionBody({ context, token, userName: "alice" }),
|
||||
});
|
||||
const res = createRes();
|
||||
await handler(req, res);
|
||||
return { res, requestLog };
|
||||
}
|
||||
|
||||
async function runInvalidActionRequest(actionId: string) {
|
||||
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
|
||||
const token = generateInteractionToken(context, "acct");
|
||||
const { context, token } = createActionContext();
|
||||
const handler = createMattermostInteractionHandler({
|
||||
client: {
|
||||
request: async () => ({
|
||||
channel_id: "chan-1",
|
||||
message: "Choose",
|
||||
props: {
|
||||
attachments: [{ actions: [{ id: actionId, name: actionId }] }],
|
||||
},
|
||||
}),
|
||||
request: async () => createActionPost({ actionId, actionName: actionId }),
|
||||
} as unknown as MattermostClient,
|
||||
botUserId: "bot",
|
||||
accountId: "acct",
|
||||
});
|
||||
|
||||
const req = createReq({
|
||||
body: {
|
||||
user_id: "user-1",
|
||||
channel_id: "chan-1",
|
||||
post_id: "post-1",
|
||||
context: { ...context, _token: token },
|
||||
},
|
||||
return await runHandler(handler, {
|
||||
body: createInteractionBody({ context, token }),
|
||||
});
|
||||
const res = createRes();
|
||||
await handler(req, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
it("accepts callback requests from an allowlisted source IP", async () => {
|
||||
@ -582,12 +648,7 @@ describe("createMattermostInteractionHandler", () => {
|
||||
remoteAddress: "198.51.100.8",
|
||||
});
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(res.body).toBe("{}");
|
||||
expect(requestLog).toEqual([
|
||||
{ path: "/posts/post-1", method: undefined },
|
||||
{ path: "/posts/post-1", method: "PUT" },
|
||||
]);
|
||||
expectSuccessfulApprovalUpdate(res, requestLog);
|
||||
});
|
||||
|
||||
it("accepts forwarded Mattermost source IPs from a trusted proxy", async () => {
|
||||
@ -603,8 +664,7 @@ describe("createMattermostInteractionHandler", () => {
|
||||
});
|
||||
|
||||
it("rejects callback requests from non-allowlisted source IPs", async () => {
|
||||
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
|
||||
const token = generateInteractionToken(context, "acct");
|
||||
const { context, token } = createActionContext();
|
||||
const handler = createMattermostInteractionHandler({
|
||||
client: {
|
||||
request: async () => {
|
||||
@ -616,33 +676,17 @@ describe("createMattermostInteractionHandler", () => {
|
||||
allowedSourceIps: ["127.0.0.1"],
|
||||
});
|
||||
|
||||
const req = createReq({
|
||||
const res = await runHandler(handler, {
|
||||
remoteAddress: "198.51.100.8",
|
||||
body: {
|
||||
user_id: "user-1",
|
||||
channel_id: "chan-1",
|
||||
post_id: "post-1",
|
||||
context: { ...context, _token: token },
|
||||
},
|
||||
body: createInteractionBody({ context, token }),
|
||||
});
|
||||
const res = createRes();
|
||||
|
||||
await handler(req, res);
|
||||
|
||||
expect(res.statusCode).toBe(403);
|
||||
expect(res.body).toContain("Forbidden origin");
|
||||
expectForbiddenResponse(res, "Forbidden origin");
|
||||
});
|
||||
|
||||
it("rejects requests with an invalid interaction token", async () => {
|
||||
const handler = createMattermostInteractionHandler({
|
||||
client: {
|
||||
request: async () => ({ message: "unused" }),
|
||||
} as unknown as MattermostClient,
|
||||
botUserId: "bot",
|
||||
accountId: "acct",
|
||||
});
|
||||
const handler = createUnusedInteractionHandler();
|
||||
|
||||
const req = createReq({
|
||||
const res = await runHandler(handler, {
|
||||
body: {
|
||||
user_id: "user-1",
|
||||
channel_id: "chan-1",
|
||||
@ -650,72 +694,33 @@ describe("createMattermostInteractionHandler", () => {
|
||||
context: { action_id: "approve", _token: "deadbeef" },
|
||||
},
|
||||
});
|
||||
const res = createRes();
|
||||
|
||||
await handler(req, res);
|
||||
|
||||
expect(res.statusCode).toBe(403);
|
||||
expect(res.body).toContain("Invalid token");
|
||||
expectForbiddenResponse(res, "Invalid token");
|
||||
});
|
||||
|
||||
it("rejects requests when the signed channel does not match the callback payload", async () => {
|
||||
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
|
||||
const token = generateInteractionToken(context, "acct");
|
||||
const handler = createMattermostInteractionHandler({
|
||||
client: {
|
||||
request: async () => ({ message: "unused" }),
|
||||
} as unknown as MattermostClient,
|
||||
botUserId: "bot",
|
||||
accountId: "acct",
|
||||
const { context, token } = createActionContext();
|
||||
const handler = createUnusedInteractionHandler();
|
||||
|
||||
const res = await runHandler(handler, {
|
||||
body: createInteractionBody({ context, token, channelId: "chan-2" }),
|
||||
});
|
||||
|
||||
const req = createReq({
|
||||
body: {
|
||||
user_id: "user-1",
|
||||
channel_id: "chan-2",
|
||||
post_id: "post-1",
|
||||
context: { ...context, _token: token },
|
||||
},
|
||||
});
|
||||
const res = createRes();
|
||||
|
||||
await handler(req, res);
|
||||
|
||||
expect(res.statusCode).toBe(403);
|
||||
expect(res.body).toContain("Channel mismatch");
|
||||
expectForbiddenResponse(res, "Channel mismatch");
|
||||
});
|
||||
|
||||
it("rejects requests when the fetched post does not belong to the callback channel", async () => {
|
||||
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
|
||||
const token = generateInteractionToken(context, "acct");
|
||||
const { context, token } = createActionContext();
|
||||
const handler = createMattermostInteractionHandler({
|
||||
client: {
|
||||
request: async () => ({
|
||||
channel_id: "chan-9",
|
||||
message: "Choose",
|
||||
props: {
|
||||
attachments: [{ actions: [{ id: "approve", name: "Approve" }] }],
|
||||
},
|
||||
}),
|
||||
request: async () => createActionPost({ channelId: "chan-9" }),
|
||||
} as unknown as MattermostClient,
|
||||
botUserId: "bot",
|
||||
accountId: "acct",
|
||||
});
|
||||
|
||||
const req = createReq({
|
||||
body: {
|
||||
user_id: "user-1",
|
||||
channel_id: "chan-1",
|
||||
post_id: "post-1",
|
||||
context: { ...context, _token: token },
|
||||
},
|
||||
const res = await runHandler(handler, {
|
||||
body: createInteractionBody({ context, token }),
|
||||
});
|
||||
const res = createRes();
|
||||
|
||||
await handler(req, res);
|
||||
|
||||
expect(res.statusCode).toBe(403);
|
||||
expect(res.body).toContain("Post/channel mismatch");
|
||||
expectForbiddenResponse(res, "Post/channel mismatch");
|
||||
});
|
||||
|
||||
it("rejects requests when the action is not present on the fetched post", async () => {
|
||||
@ -730,12 +735,7 @@ describe("createMattermostInteractionHandler", () => {
|
||||
actionName: "approve",
|
||||
});
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(res.body).toBe("{}");
|
||||
expect(requestLog).toEqual([
|
||||
{ path: "/posts/post-1", method: undefined },
|
||||
{ path: "/posts/post-1", method: "PUT" },
|
||||
]);
|
||||
expectSuccessfulApprovalUpdate(res, requestLog);
|
||||
});
|
||||
|
||||
it("forwards fetched post threading metadata to session and button callbacks", async () => {
|
||||
@ -745,19 +745,10 @@ describe("createMattermostInteractionHandler", () => {
|
||||
enqueueSystemEvent,
|
||||
},
|
||||
} as unknown as Parameters<typeof setMattermostRuntime>[0]);
|
||||
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
|
||||
const token = generateInteractionToken(context, "acct");
|
||||
const { context, token } = createActionContext();
|
||||
const resolveSessionKey = vi.fn().mockResolvedValue("session:thread:root-9");
|
||||
const dispatchButtonClick = vi.fn();
|
||||
const fetchedPost: MattermostPost = {
|
||||
id: "post-1",
|
||||
channel_id: "chan-1",
|
||||
root_id: "root-9",
|
||||
message: "Choose",
|
||||
props: {
|
||||
attachments: [{ actions: [{ id: "approve", name: "Approve" }] }],
|
||||
},
|
||||
};
|
||||
const fetchedPost = createActionPost({ rootId: "root-9" });
|
||||
const handler = createMattermostInteractionHandler({
|
||||
client: {
|
||||
request: async (_path: string, init?: { method?: string }) =>
|
||||
@ -769,19 +760,9 @@ describe("createMattermostInteractionHandler", () => {
|
||||
dispatchButtonClick,
|
||||
});
|
||||
|
||||
const req = createReq({
|
||||
body: {
|
||||
user_id: "user-1",
|
||||
user_name: "alice",
|
||||
channel_id: "chan-1",
|
||||
post_id: "post-1",
|
||||
context: { ...context, _token: token },
|
||||
},
|
||||
const res = await runHandler(handler, {
|
||||
body: createInteractionBody({ context, token, userName: "alice" }),
|
||||
});
|
||||
const res = createRes();
|
||||
|
||||
await handler(req, res);
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(resolveSessionKey).toHaveBeenCalledWith({
|
||||
channelId: "chan-1",
|
||||
@ -803,8 +784,7 @@ describe("createMattermostInteractionHandler", () => {
|
||||
});
|
||||
|
||||
it("lets a custom interaction handler short-circuit generic completion updates", async () => {
|
||||
const context = { action_id: "mdlprov", __openclaw_channel_id: "chan-1" };
|
||||
const token = generateInteractionToken(context, "acct");
|
||||
const { context, token } = createActionContext("mdlprov");
|
||||
const requestLog: Array<{ path: string; method?: string }> = [];
|
||||
const handleInteraction = vi.fn().mockResolvedValue({
|
||||
ephemeral_text: "Only the original requester can use this picker.",
|
||||
@ -814,14 +794,10 @@ describe("createMattermostInteractionHandler", () => {
|
||||
client: {
|
||||
request: async (path: string, init?: { method?: string }) => {
|
||||
requestLog.push({ path, method: init?.method });
|
||||
return {
|
||||
id: "post-1",
|
||||
channel_id: "chan-1",
|
||||
message: "Choose",
|
||||
props: {
|
||||
attachments: [{ actions: [{ id: "mdlprov", name: "Browse providers" }] }],
|
||||
},
|
||||
};
|
||||
return createActionPost({
|
||||
actionId: "mdlprov",
|
||||
actionName: "Browse providers",
|
||||
});
|
||||
},
|
||||
} as unknown as MattermostClient,
|
||||
botUserId: "bot",
|
||||
@ -830,18 +806,14 @@ describe("createMattermostInteractionHandler", () => {
|
||||
dispatchButtonClick,
|
||||
});
|
||||
|
||||
const req = createReq({
|
||||
body: {
|
||||
user_id: "user-2",
|
||||
user_name: "alice",
|
||||
channel_id: "chan-1",
|
||||
post_id: "post-1",
|
||||
context: { ...context, _token: token },
|
||||
},
|
||||
const res = await runHandler(handler, {
|
||||
body: createInteractionBody({
|
||||
context,
|
||||
token,
|
||||
userId: "user-2",
|
||||
userName: "alice",
|
||||
}),
|
||||
});
|
||||
const res = createRes();
|
||||
|
||||
await handler(req, res);
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(res.body).toBe(
|
||||
|
||||
@ -16,6 +16,35 @@ const accountFixture: ResolvedMattermostAccount = {
|
||||
config: {},
|
||||
};
|
||||
|
||||
function authorizeGroupCommand(senderId: string) {
|
||||
return authorizeMattermostCommandInvocation({
|
||||
account: {
|
||||
...accountFixture,
|
||||
config: {
|
||||
groupPolicy: "allowlist",
|
||||
allowFrom: ["trusted-user"],
|
||||
},
|
||||
},
|
||||
cfg: {
|
||||
commands: {
|
||||
useAccessGroups: true,
|
||||
},
|
||||
},
|
||||
senderId,
|
||||
senderName: senderId,
|
||||
channelId: "chan-1",
|
||||
channelInfo: {
|
||||
id: "chan-1",
|
||||
type: "O",
|
||||
name: "general",
|
||||
display_name: "General",
|
||||
},
|
||||
storeAllowFrom: [],
|
||||
allowTextCommands: true,
|
||||
hasControlCommand: true,
|
||||
});
|
||||
}
|
||||
|
||||
describe("mattermost monitor authz", () => {
|
||||
it("keeps DM allowlist merged with pairing-store entries", () => {
|
||||
const resolved = resolveMattermostEffectiveAllowFromLists({
|
||||
@ -72,32 +101,7 @@ describe("mattermost monitor authz", () => {
|
||||
});
|
||||
|
||||
it("denies group control commands when the sender is outside the allowlist", () => {
|
||||
const decision = authorizeMattermostCommandInvocation({
|
||||
account: {
|
||||
...accountFixture,
|
||||
config: {
|
||||
groupPolicy: "allowlist",
|
||||
allowFrom: ["trusted-user"],
|
||||
},
|
||||
},
|
||||
cfg: {
|
||||
commands: {
|
||||
useAccessGroups: true,
|
||||
},
|
||||
},
|
||||
senderId: "attacker",
|
||||
senderName: "attacker",
|
||||
channelId: "chan-1",
|
||||
channelInfo: {
|
||||
id: "chan-1",
|
||||
type: "O",
|
||||
name: "general",
|
||||
display_name: "General",
|
||||
},
|
||||
storeAllowFrom: [],
|
||||
allowTextCommands: true,
|
||||
hasControlCommand: true,
|
||||
});
|
||||
const decision = authorizeGroupCommand("attacker");
|
||||
|
||||
expect(decision).toMatchObject({
|
||||
ok: false,
|
||||
@ -107,32 +111,7 @@ describe("mattermost monitor authz", () => {
|
||||
});
|
||||
|
||||
it("authorizes group control commands for allowlisted senders", () => {
|
||||
const decision = authorizeMattermostCommandInvocation({
|
||||
account: {
|
||||
...accountFixture,
|
||||
config: {
|
||||
groupPolicy: "allowlist",
|
||||
allowFrom: ["trusted-user"],
|
||||
},
|
||||
},
|
||||
cfg: {
|
||||
commands: {
|
||||
useAccessGroups: true,
|
||||
},
|
||||
},
|
||||
senderId: "trusted-user",
|
||||
senderName: "trusted-user",
|
||||
channelId: "chan-1",
|
||||
channelInfo: {
|
||||
id: "chan-1",
|
||||
type: "O",
|
||||
name: "general",
|
||||
display_name: "General",
|
||||
},
|
||||
storeAllowFrom: [],
|
||||
allowTextCommands: true,
|
||||
hasControlCommand: true,
|
||||
});
|
||||
const decision = authorizeGroupCommand("trusted-user");
|
||||
|
||||
expect(decision).toMatchObject({
|
||||
ok: true,
|
||||
|
||||
@ -14,6 +14,28 @@ describe("mattermost reactions", () => {
|
||||
resetMattermostReactionBotUserCacheForTests();
|
||||
});
|
||||
|
||||
async function addReactionWithFetch(
|
||||
fetchMock: ReturnType<typeof createMattermostReactionFetchMock>,
|
||||
) {
|
||||
return addMattermostReaction({
|
||||
cfg: createMattermostTestConfig(),
|
||||
postId: "POST1",
|
||||
emojiName: "thumbsup",
|
||||
fetchImpl: fetchMock as unknown as typeof fetch,
|
||||
});
|
||||
}
|
||||
|
||||
async function removeReactionWithFetch(
|
||||
fetchMock: ReturnType<typeof createMattermostReactionFetchMock>,
|
||||
) {
|
||||
return removeMattermostReaction({
|
||||
cfg: createMattermostTestConfig(),
|
||||
postId: "POST1",
|
||||
emojiName: "thumbsup",
|
||||
fetchImpl: fetchMock as unknown as typeof fetch,
|
||||
});
|
||||
}
|
||||
|
||||
it("adds reactions by calling /users/me then POST /reactions", async () => {
|
||||
const fetchMock = createMattermostReactionFetchMock({
|
||||
mode: "add",
|
||||
@ -21,12 +43,7 @@ describe("mattermost reactions", () => {
|
||||
emojiName: "thumbsup",
|
||||
});
|
||||
|
||||
const result = await addMattermostReaction({
|
||||
cfg: createMattermostTestConfig(),
|
||||
postId: "POST1",
|
||||
emojiName: "thumbsup",
|
||||
fetchImpl: fetchMock as unknown as typeof fetch,
|
||||
});
|
||||
const result = await addReactionWithFetch(fetchMock);
|
||||
|
||||
expect(result).toEqual({ ok: true });
|
||||
expect(fetchMock).toHaveBeenCalled();
|
||||
@ -41,12 +58,7 @@ describe("mattermost reactions", () => {
|
||||
body: { id: "err", message: "boom" },
|
||||
});
|
||||
|
||||
const result = await addMattermostReaction({
|
||||
cfg: createMattermostTestConfig(),
|
||||
postId: "POST1",
|
||||
emojiName: "thumbsup",
|
||||
fetchImpl: fetchMock as unknown as typeof fetch,
|
||||
});
|
||||
const result = await addReactionWithFetch(fetchMock);
|
||||
|
||||
expect(result.ok).toBe(false);
|
||||
if (!result.ok) {
|
||||
@ -61,12 +73,7 @@ describe("mattermost reactions", () => {
|
||||
emojiName: "thumbsup",
|
||||
});
|
||||
|
||||
const result = await removeMattermostReaction({
|
||||
cfg: createMattermostTestConfig(),
|
||||
postId: "POST1",
|
||||
emojiName: "thumbsup",
|
||||
fetchImpl: fetchMock as unknown as typeof fetch,
|
||||
});
|
||||
const result = await removeReactionWithFetch(fetchMock);
|
||||
|
||||
expect(result).toEqual({ ok: true });
|
||||
expect(fetchMock).toHaveBeenCalled();
|
||||
|
||||
@ -10,6 +10,25 @@ import {
|
||||
} from "./slash-commands.js";
|
||||
|
||||
describe("slash-commands", () => {
|
||||
async function registerSingleStatusCommand(
|
||||
request: (path: string, init?: { method?: string }) => Promise<unknown>,
|
||||
) {
|
||||
const client = { request } as unknown as MattermostClient;
|
||||
return registerSlashCommands({
|
||||
client,
|
||||
teamId: "team-1",
|
||||
creatorUserId: "bot-user",
|
||||
callbackUrl: "http://gateway/callback",
|
||||
commands: [
|
||||
{
|
||||
trigger: "oc_status",
|
||||
description: "status",
|
||||
autoComplete: true,
|
||||
},
|
||||
],
|
||||
});
|
||||
}
|
||||
|
||||
it("parses application/x-www-form-urlencoded payloads", () => {
|
||||
const payload = parseSlashCommandPayload(
|
||||
"token=t1&team_id=team&channel_id=ch1&user_id=u1&command=%2Foc_status&text=now",
|
||||
@ -101,21 +120,7 @@ describe("slash-commands", () => {
|
||||
}
|
||||
throw new Error(`unexpected request path: ${path}`);
|
||||
});
|
||||
const client = { request } as unknown as MattermostClient;
|
||||
|
||||
const result = await registerSlashCommands({
|
||||
client,
|
||||
teamId: "team-1",
|
||||
creatorUserId: "bot-user",
|
||||
callbackUrl: "http://gateway/callback",
|
||||
commands: [
|
||||
{
|
||||
trigger: "oc_status",
|
||||
description: "status",
|
||||
autoComplete: true,
|
||||
},
|
||||
],
|
||||
});
|
||||
const result = await registerSingleStatusCommand(request);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]?.managed).toBe(false);
|
||||
@ -144,21 +149,7 @@ describe("slash-commands", () => {
|
||||
}
|
||||
throw new Error(`unexpected request path: ${path}`);
|
||||
});
|
||||
const client = { request } as unknown as MattermostClient;
|
||||
|
||||
const result = await registerSlashCommands({
|
||||
client,
|
||||
teamId: "team-1",
|
||||
creatorUserId: "bot-user",
|
||||
callbackUrl: "http://gateway/callback",
|
||||
commands: [
|
||||
{
|
||||
trigger: "oc_status",
|
||||
description: "status",
|
||||
autoComplete: true,
|
||||
},
|
||||
],
|
||||
});
|
||||
const result = await registerSingleStatusCommand(request);
|
||||
|
||||
expect(result).toHaveLength(0);
|
||||
expect(request).toHaveBeenCalledTimes(1);
|
||||
|
||||
@ -58,6 +58,23 @@ const accountFixture: ResolvedMattermostAccount = {
|
||||
config: {},
|
||||
};
|
||||
|
||||
async function runSlashRequest(params: {
|
||||
commandTokens: Set<string>;
|
||||
body: string;
|
||||
method?: string;
|
||||
}) {
|
||||
const handler = createSlashCommandHttpHandler({
|
||||
account: accountFixture,
|
||||
cfg: {} as OpenClawConfig,
|
||||
runtime: {} as RuntimeEnv,
|
||||
commandTokens: params.commandTokens,
|
||||
});
|
||||
const req = createRequest({ method: params.method, body: params.body });
|
||||
const response = createResponse();
|
||||
await handler(req, response.res);
|
||||
return response;
|
||||
}
|
||||
|
||||
describe("slash-http", () => {
|
||||
it("rejects non-POST methods", async () => {
|
||||
const handler = createSlashCommandHttpHandler({
|
||||
@ -93,36 +110,20 @@ describe("slash-http", () => {
|
||||
});
|
||||
|
||||
it("fails closed when no command tokens are registered", async () => {
|
||||
const handler = createSlashCommandHttpHandler({
|
||||
account: accountFixture,
|
||||
cfg: {} as OpenClawConfig,
|
||||
runtime: {} as RuntimeEnv,
|
||||
const response = await runSlashRequest({
|
||||
commandTokens: new Set<string>(),
|
||||
});
|
||||
const req = createRequest({
|
||||
body: "token=tok1&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=",
|
||||
});
|
||||
const response = createResponse();
|
||||
|
||||
await handler(req, response.res);
|
||||
|
||||
expect(response.res.statusCode).toBe(401);
|
||||
expect(response.getBody()).toContain("Unauthorized: invalid command token.");
|
||||
});
|
||||
|
||||
it("rejects unknown command tokens", async () => {
|
||||
const handler = createSlashCommandHttpHandler({
|
||||
account: accountFixture,
|
||||
cfg: {} as OpenClawConfig,
|
||||
runtime: {} as RuntimeEnv,
|
||||
const response = await runSlashRequest({
|
||||
commandTokens: new Set(["known-token"]),
|
||||
});
|
||||
const req = createRequest({
|
||||
body: "token=unknown&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=",
|
||||
});
|
||||
const response = createResponse();
|
||||
|
||||
await handler(req, response.res);
|
||||
|
||||
expect(response.res.statusCode).toBe(401);
|
||||
expect(response.getBody()).toContain("Unauthorized: invalid command token.");
|
||||
|
||||
@ -4,6 +4,9 @@
|
||||
"private": true,
|
||||
"description": "OpenClaw core memory search plugin",
|
||||
"type": "module",
|
||||
"devDependencies": {
|
||||
"openclaw": "workspace:*"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"openclaw": ">=2026.3.11"
|
||||
},
|
||||
|
||||
101
extensions/msteams/src/graph-upload.test.ts
Normal file
101
extensions/msteams/src/graph-upload.test.ts
Normal file
@ -0,0 +1,101 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { uploadToOneDrive, uploadToSharePoint } from "./graph-upload.js";
|
||||
|
||||
describe("graph upload helpers", () => {
|
||||
const tokenProvider = {
|
||||
getAccessToken: vi.fn(async () => "graph-token"),
|
||||
};
|
||||
|
||||
it("uploads to OneDrive with the personal drive path", async () => {
|
||||
const fetchFn = vi.fn(
|
||||
async () =>
|
||||
new Response(
|
||||
JSON.stringify({ id: "item-1", webUrl: "https://example.com/1", name: "a.txt" }),
|
||||
{
|
||||
status: 200,
|
||||
headers: { "content-type": "application/json" },
|
||||
},
|
||||
),
|
||||
);
|
||||
|
||||
const result = await uploadToOneDrive({
|
||||
buffer: Buffer.from("hello"),
|
||||
filename: "a.txt",
|
||||
tokenProvider,
|
||||
fetchFn: fetchFn as typeof fetch,
|
||||
});
|
||||
|
||||
expect(fetchFn).toHaveBeenCalledWith(
|
||||
"https://graph.microsoft.com/v1.0/me/drive/root:/OpenClawShared/a.txt:/content",
|
||||
expect.objectContaining({
|
||||
method: "PUT",
|
||||
headers: expect.objectContaining({
|
||||
Authorization: "Bearer graph-token",
|
||||
"Content-Type": "application/octet-stream",
|
||||
}),
|
||||
}),
|
||||
);
|
||||
expect(result).toEqual({
|
||||
id: "item-1",
|
||||
webUrl: "https://example.com/1",
|
||||
name: "a.txt",
|
||||
});
|
||||
});
|
||||
|
||||
it("uploads to SharePoint with the site drive path", async () => {
|
||||
const fetchFn = vi.fn(
|
||||
async () =>
|
||||
new Response(
|
||||
JSON.stringify({ id: "item-2", webUrl: "https://example.com/2", name: "b.txt" }),
|
||||
{
|
||||
status: 200,
|
||||
headers: { "content-type": "application/json" },
|
||||
},
|
||||
),
|
||||
);
|
||||
|
||||
const result = await uploadToSharePoint({
|
||||
buffer: Buffer.from("world"),
|
||||
filename: "b.txt",
|
||||
siteId: "site-123",
|
||||
tokenProvider,
|
||||
fetchFn: fetchFn as typeof fetch,
|
||||
});
|
||||
|
||||
expect(fetchFn).toHaveBeenCalledWith(
|
||||
"https://graph.microsoft.com/v1.0/sites/site-123/drive/root:/OpenClawShared/b.txt:/content",
|
||||
expect.objectContaining({
|
||||
method: "PUT",
|
||||
headers: expect.objectContaining({
|
||||
Authorization: "Bearer graph-token",
|
||||
"Content-Type": "application/octet-stream",
|
||||
}),
|
||||
}),
|
||||
);
|
||||
expect(result).toEqual({
|
||||
id: "item-2",
|
||||
webUrl: "https://example.com/2",
|
||||
name: "b.txt",
|
||||
});
|
||||
});
|
||||
|
||||
it("rejects upload responses missing required fields", async () => {
|
||||
const fetchFn = vi.fn(
|
||||
async () =>
|
||||
new Response(JSON.stringify({ id: "item-3" }), {
|
||||
status: 200,
|
||||
headers: { "content-type": "application/json" },
|
||||
}),
|
||||
);
|
||||
|
||||
await expect(
|
||||
uploadToSharePoint({
|
||||
buffer: Buffer.from("world"),
|
||||
filename: "bad.txt",
|
||||
siteId: "site-123",
|
||||
tokenProvider,
|
||||
fetchFn: fetchFn as typeof fetch,
|
||||
}),
|
||||
).rejects.toThrow("SharePoint upload response missing required fields");
|
||||
});
|
||||
});
|
||||
@ -21,6 +21,53 @@ export interface OneDriveUploadResult {
|
||||
name: string;
|
||||
}
|
||||
|
||||
function parseUploadedDriveItem(
|
||||
data: { id?: string; webUrl?: string; name?: string },
|
||||
label: "OneDrive" | "SharePoint",
|
||||
): OneDriveUploadResult {
|
||||
if (!data.id || !data.webUrl || !data.name) {
|
||||
throw new Error(`${label} upload response missing required fields`);
|
||||
}
|
||||
|
||||
return {
|
||||
id: data.id,
|
||||
webUrl: data.webUrl,
|
||||
name: data.name,
|
||||
};
|
||||
}
|
||||
|
||||
async function uploadDriveItem(params: {
|
||||
buffer: Buffer;
|
||||
filename: string;
|
||||
contentType?: string;
|
||||
tokenProvider: MSTeamsAccessTokenProvider;
|
||||
fetchFn?: typeof fetch;
|
||||
url: string;
|
||||
label: "OneDrive" | "SharePoint";
|
||||
}): Promise<OneDriveUploadResult> {
|
||||
const fetchFn = params.fetchFn ?? fetch;
|
||||
const token = await params.tokenProvider.getAccessToken(GRAPH_SCOPE);
|
||||
|
||||
const res = await fetchFn(params.url, {
|
||||
method: "PUT",
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
"Content-Type": params.contentType ?? "application/octet-stream",
|
||||
},
|
||||
body: new Uint8Array(params.buffer),
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
const body = await res.text().catch(() => "");
|
||||
throw new Error(`${params.label} upload failed: ${res.status} ${res.statusText} - ${body}`);
|
||||
}
|
||||
|
||||
return parseUploadedDriveItem(
|
||||
(await res.json()) as { id?: string; webUrl?: string; name?: string },
|
||||
params.label,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Upload a file to the user's OneDrive root folder.
|
||||
* For larger files, this uses the simple upload endpoint (up to 4MB).
|
||||
@ -32,41 +79,13 @@ export async function uploadToOneDrive(params: {
|
||||
tokenProvider: MSTeamsAccessTokenProvider;
|
||||
fetchFn?: typeof fetch;
|
||||
}): Promise<OneDriveUploadResult> {
|
||||
const fetchFn = params.fetchFn ?? fetch;
|
||||
const token = await params.tokenProvider.getAccessToken(GRAPH_SCOPE);
|
||||
|
||||
// Use "OpenClawShared" folder to organize bot-uploaded files
|
||||
const uploadPath = `/OpenClawShared/${encodeURIComponent(params.filename)}`;
|
||||
|
||||
const res = await fetchFn(`${GRAPH_ROOT}/me/drive/root:${uploadPath}:/content`, {
|
||||
method: "PUT",
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
"Content-Type": params.contentType ?? "application/octet-stream",
|
||||
},
|
||||
body: new Uint8Array(params.buffer),
|
||||
return await uploadDriveItem({
|
||||
...params,
|
||||
url: `${GRAPH_ROOT}/me/drive/root:${uploadPath}:/content`,
|
||||
label: "OneDrive",
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
const body = await res.text().catch(() => "");
|
||||
throw new Error(`OneDrive upload failed: ${res.status} ${res.statusText} - ${body}`);
|
||||
}
|
||||
|
||||
const data = (await res.json()) as {
|
||||
id?: string;
|
||||
webUrl?: string;
|
||||
name?: string;
|
||||
};
|
||||
|
||||
if (!data.id || !data.webUrl || !data.name) {
|
||||
throw new Error("OneDrive upload response missing required fields");
|
||||
}
|
||||
|
||||
return {
|
||||
id: data.id,
|
||||
webUrl: data.webUrl,
|
||||
name: data.name,
|
||||
};
|
||||
}
|
||||
|
||||
export interface OneDriveSharingLink {
|
||||
@ -175,44 +194,13 @@ export async function uploadToSharePoint(params: {
|
||||
siteId: string;
|
||||
fetchFn?: typeof fetch;
|
||||
}): Promise<OneDriveUploadResult> {
|
||||
const fetchFn = params.fetchFn ?? fetch;
|
||||
const token = await params.tokenProvider.getAccessToken(GRAPH_SCOPE);
|
||||
|
||||
// Use "OpenClawShared" folder to organize bot-uploaded files
|
||||
const uploadPath = `/OpenClawShared/${encodeURIComponent(params.filename)}`;
|
||||
|
||||
const res = await fetchFn(
|
||||
`${GRAPH_ROOT}/sites/${params.siteId}/drive/root:${uploadPath}:/content`,
|
||||
{
|
||||
method: "PUT",
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
"Content-Type": params.contentType ?? "application/octet-stream",
|
||||
},
|
||||
body: new Uint8Array(params.buffer),
|
||||
},
|
||||
);
|
||||
|
||||
if (!res.ok) {
|
||||
const body = await res.text().catch(() => "");
|
||||
throw new Error(`SharePoint upload failed: ${res.status} ${res.statusText} - ${body}`);
|
||||
}
|
||||
|
||||
const data = (await res.json()) as {
|
||||
id?: string;
|
||||
webUrl?: string;
|
||||
name?: string;
|
||||
};
|
||||
|
||||
if (!data.id || !data.webUrl || !data.name) {
|
||||
throw new Error("SharePoint upload response missing required fields");
|
||||
}
|
||||
|
||||
return {
|
||||
id: data.id,
|
||||
webUrl: data.webUrl,
|
||||
name: data.name,
|
||||
};
|
||||
return await uploadDriveItem({
|
||||
...params,
|
||||
url: `${GRAPH_ROOT}/sites/${params.siteId}/drive/root:${uploadPath}:/content`,
|
||||
label: "SharePoint",
|
||||
});
|
||||
}
|
||||
|
||||
export interface ChatMember {
|
||||
|
||||
28
extensions/nextcloud-talk/src/normalize.test.ts
Normal file
28
extensions/nextcloud-talk/src/normalize.test.ts
Normal file
@ -0,0 +1,28 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
looksLikeNextcloudTalkTargetId,
|
||||
normalizeNextcloudTalkMessagingTarget,
|
||||
stripNextcloudTalkTargetPrefix,
|
||||
} from "./normalize.js";
|
||||
|
||||
describe("nextcloud-talk target normalization", () => {
|
||||
it("strips supported prefixes to a room token", () => {
|
||||
expect(stripNextcloudTalkTargetPrefix(" room:abc123 ")).toBe("abc123");
|
||||
expect(stripNextcloudTalkTargetPrefix("nextcloud-talk:room:AbC123")).toBe("AbC123");
|
||||
expect(stripNextcloudTalkTargetPrefix("nc-talk:room:ops")).toBe("ops");
|
||||
expect(stripNextcloudTalkTargetPrefix("nc:room:ops")).toBe("ops");
|
||||
expect(stripNextcloudTalkTargetPrefix("room: ")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("normalizes messaging targets to lowercase channel ids", () => {
|
||||
expect(normalizeNextcloudTalkMessagingTarget("room:AbC123")).toBe("nextcloud-talk:abc123");
|
||||
expect(normalizeNextcloudTalkMessagingTarget("nc-talk:room:Ops")).toBe("nextcloud-talk:ops");
|
||||
});
|
||||
|
||||
it("detects prefixed and bare room ids", () => {
|
||||
expect(looksLikeNextcloudTalkTargetId("nextcloud-talk:room:abc12345")).toBe(true);
|
||||
expect(looksLikeNextcloudTalkTargetId("nc:opsroom1")).toBe(true);
|
||||
expect(looksLikeNextcloudTalkTargetId("abc12345")).toBe(true);
|
||||
expect(looksLikeNextcloudTalkTargetId("")).toBe(false);
|
||||
});
|
||||
});
|
||||
@ -1,4 +1,4 @@
|
||||
export function normalizeNextcloudTalkMessagingTarget(raw: string): string | undefined {
|
||||
export function stripNextcloudTalkTargetPrefix(raw: string): string | undefined {
|
||||
const trimmed = raw.trim();
|
||||
if (!trimmed) {
|
||||
return undefined;
|
||||
@ -22,7 +22,12 @@ export function normalizeNextcloudTalkMessagingTarget(raw: string): string | und
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return `nextcloud-talk:${normalized}`.toLowerCase();
|
||||
return normalized;
|
||||
}
|
||||
|
||||
export function normalizeNextcloudTalkMessagingTarget(raw: string): string | undefined {
|
||||
const normalized = stripNextcloudTalkTargetPrefix(raw);
|
||||
return normalized ? `nextcloud-talk:${normalized}`.toLowerCase() : undefined;
|
||||
}
|
||||
|
||||
export function looksLikeNextcloudTalkTargetId(raw: string): boolean {
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import { resolveNextcloudTalkAccount } from "./accounts.js";
|
||||
import { stripNextcloudTalkTargetPrefix } from "./normalize.js";
|
||||
import { getNextcloudTalkRuntime } from "./runtime.js";
|
||||
import { generateNextcloudTalkSignature } from "./signature.js";
|
||||
import type { CoreConfig, NextcloudTalkSendResult } from "./types.js";
|
||||
@ -34,33 +35,19 @@ function resolveCredentials(
|
||||
}
|
||||
|
||||
function normalizeRoomToken(to: string): string {
|
||||
const trimmed = to.trim();
|
||||
if (!trimmed) {
|
||||
throw new Error("Room token is required for Nextcloud Talk sends");
|
||||
}
|
||||
|
||||
let normalized = trimmed;
|
||||
if (normalized.startsWith("nextcloud-talk:")) {
|
||||
normalized = normalized.slice("nextcloud-talk:".length).trim();
|
||||
} else if (normalized.startsWith("nc:")) {
|
||||
normalized = normalized.slice("nc:".length).trim();
|
||||
}
|
||||
|
||||
if (normalized.startsWith("room:")) {
|
||||
normalized = normalized.slice("room:".length).trim();
|
||||
}
|
||||
|
||||
const normalized = stripNextcloudTalkTargetPrefix(to);
|
||||
if (!normalized) {
|
||||
throw new Error("Room token is required for Nextcloud Talk sends");
|
||||
}
|
||||
return normalized;
|
||||
}
|
||||
|
||||
export async function sendMessageNextcloudTalk(
|
||||
to: string,
|
||||
text: string,
|
||||
opts: NextcloudTalkSendOpts = {},
|
||||
): Promise<NextcloudTalkSendResult> {
|
||||
function resolveNextcloudTalkSendContext(opts: NextcloudTalkSendOpts): {
|
||||
cfg: CoreConfig;
|
||||
account: ReturnType<typeof resolveNextcloudTalkAccount>;
|
||||
baseUrl: string;
|
||||
secret: string;
|
||||
} {
|
||||
const cfg = (opts.cfg ?? getNextcloudTalkRuntime().config.loadConfig()) as CoreConfig;
|
||||
const account = resolveNextcloudTalkAccount({
|
||||
cfg,
|
||||
@ -70,6 +57,15 @@ export async function sendMessageNextcloudTalk(
|
||||
{ baseUrl: opts.baseUrl, secret: opts.secret },
|
||||
account,
|
||||
);
|
||||
return { cfg, account, baseUrl, secret };
|
||||
}
|
||||
|
||||
export async function sendMessageNextcloudTalk(
|
||||
to: string,
|
||||
text: string,
|
||||
opts: NextcloudTalkSendOpts = {},
|
||||
): Promise<NextcloudTalkSendResult> {
|
||||
const { cfg, account, baseUrl, secret } = resolveNextcloudTalkSendContext(opts);
|
||||
const roomToken = normalizeRoomToken(to);
|
||||
|
||||
if (!text?.trim()) {
|
||||
@ -176,15 +172,7 @@ export async function sendReactionNextcloudTalk(
|
||||
reaction: string,
|
||||
opts: Omit<NextcloudTalkSendOpts, "replyTo"> = {},
|
||||
): Promise<{ ok: true }> {
|
||||
const cfg = (opts.cfg ?? getNextcloudTalkRuntime().config.loadConfig()) as CoreConfig;
|
||||
const account = resolveNextcloudTalkAccount({
|
||||
cfg,
|
||||
accountId: opts.accountId,
|
||||
});
|
||||
const { baseUrl, secret } = resolveCredentials(
|
||||
{ baseUrl: opts.baseUrl, secret: opts.secret },
|
||||
account,
|
||||
);
|
||||
const { account, baseUrl, secret } = resolveNextcloudTalkSendContext(opts);
|
||||
const normalizedToken = normalizeRoomToken(roomToken);
|
||||
|
||||
const body = JSON.stringify({ reaction });
|
||||
|
||||
@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from One Thousand and One Night
|
||||
| `prompt` | `command` | What is commanded of the djinn |
|
||||
| `model` | `spirit` | Which spirit answers |
|
||||
|
||||
### Unchanged
|
||||
### Shared appendix
|
||||
|
||||
These keywords already work or are too functional to replace sensibly:
|
||||
Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern.
|
||||
|
||||
- `**...**` discretion markers — already work
|
||||
- `until`, `while` — already work
|
||||
- `map`, `filter`, `reduce`, `pmap` — pipeline operators
|
||||
- `max` — constraint modifier
|
||||
- `as` — aliasing
|
||||
- Model names: `sonnet`, `opus`, `haiku` — already poetic
|
||||
Recommended Arabian Nights rewrite targets:
|
||||
|
||||
---
|
||||
|
||||
## Side-by-Side Comparison
|
||||
|
||||
### Simple Program
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
use "@alice/research" as research
|
||||
input topic: "What to investigate"
|
||||
|
||||
agent helper:
|
||||
model: sonnet
|
||||
|
||||
let findings = session: helper
|
||||
prompt: "Research {topic}"
|
||||
|
||||
output summary = session "Summarize"
|
||||
context: findings
|
||||
```
|
||||
|
||||
```prose
|
||||
# Nights
|
||||
conjure "@alice/research" as research
|
||||
wish topic: "What to investigate"
|
||||
|
||||
djinn helper:
|
||||
spirit: sonnet
|
||||
|
||||
name findings = tale: helper
|
||||
command: "Research {topic}"
|
||||
|
||||
gift summary = tale "Summarize"
|
||||
scroll: findings
|
||||
```
|
||||
|
||||
### Parallel Execution
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
parallel:
|
||||
security = session "Check security"
|
||||
perf = session "Check performance"
|
||||
style = session "Check style"
|
||||
|
||||
session "Synthesize review"
|
||||
context: { security, perf, style }
|
||||
```
|
||||
|
||||
```prose
|
||||
# Nights
|
||||
bazaar:
|
||||
security = tale "Check security"
|
||||
perf = tale "Check performance"
|
||||
style = tale "Check style"
|
||||
|
||||
tale "Synthesize review"
|
||||
scroll: { security, perf, style }
|
||||
```
|
||||
|
||||
### Loop with Condition
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
loop until **the code is bug-free** (max: 5):
|
||||
session "Find and fix bugs"
|
||||
```
|
||||
|
||||
```prose
|
||||
# Nights
|
||||
telling until **the code is bug-free** (max: 5):
|
||||
tale "Find and fix bugs"
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
try:
|
||||
session "Risky operation"
|
||||
catch as err:
|
||||
session "Handle error"
|
||||
context: err
|
||||
finally:
|
||||
session "Cleanup"
|
||||
```
|
||||
|
||||
```prose
|
||||
# Nights
|
||||
venture:
|
||||
tale "Risky operation"
|
||||
should misfortune strike as err:
|
||||
tale "Handle error"
|
||||
scroll: err
|
||||
and so it was:
|
||||
tale "Cleanup"
|
||||
```
|
||||
|
||||
### Choice Block
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
choice **the severity level**:
|
||||
option "Critical":
|
||||
session "Escalate immediately"
|
||||
option "Minor":
|
||||
session "Log for later"
|
||||
```
|
||||
|
||||
```prose
|
||||
# Nights
|
||||
crossroads **the severity level**:
|
||||
path "Critical":
|
||||
tale "Escalate immediately"
|
||||
path "Minor":
|
||||
tale "Log for later"
|
||||
```
|
||||
|
||||
### Conditionals
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
if **has security issues**:
|
||||
session "Fix security"
|
||||
elif **has performance issues**:
|
||||
session "Optimize"
|
||||
else:
|
||||
session "Approve"
|
||||
```
|
||||
- `session` sample -> `tale`
|
||||
- `parallel` sample -> `bazaar`
|
||||
- `loop` sample -> `telling`
|
||||
- `try/catch/finally` sample -> `venture` / `should misfortune strike` / `and so it was`
|
||||
- `choice` sample -> `crossroads` / `path`
|
||||
|
||||
```prose
|
||||
# Nights
|
||||
|
||||
@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from Greek epic poetry—the Il
|
||||
| `prompt` | `charge` | The quest given |
|
||||
| `model` | `muse` | Which muse inspires |
|
||||
|
||||
### Unchanged
|
||||
### Shared appendix
|
||||
|
||||
These keywords already work or are too functional to replace sensibly:
|
||||
Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern.
|
||||
|
||||
- `**...**` discretion markers — already work
|
||||
- `until`, `while` — already work
|
||||
- `map`, `filter`, `reduce`, `pmap` — pipeline operators
|
||||
- `max` — constraint modifier
|
||||
- `as` — aliasing
|
||||
- Model names: `sonnet`, `opus`, `haiku` — already poetic
|
||||
Recommended Homeric rewrite targets:
|
||||
|
||||
---
|
||||
|
||||
## Side-by-Side Comparison
|
||||
|
||||
### Simple Program
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
use "@alice/research" as research
|
||||
input topic: "What to investigate"
|
||||
|
||||
agent helper:
|
||||
model: sonnet
|
||||
|
||||
let findings = session: helper
|
||||
prompt: "Research {topic}"
|
||||
|
||||
output summary = session "Summarize"
|
||||
context: findings
|
||||
```
|
||||
|
||||
```prose
|
||||
# Homeric
|
||||
invoke "@alice/research" as research
|
||||
omen topic: "What to investigate"
|
||||
|
||||
hero helper:
|
||||
muse: sonnet
|
||||
|
||||
decree findings = trial: helper
|
||||
charge: "Research {topic}"
|
||||
|
||||
glory summary = trial "Summarize"
|
||||
tidings: findings
|
||||
```
|
||||
|
||||
### Parallel Execution
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
parallel:
|
||||
security = session "Check security"
|
||||
perf = session "Check performance"
|
||||
style = session "Check style"
|
||||
|
||||
session "Synthesize review"
|
||||
context: { security, perf, style }
|
||||
```
|
||||
|
||||
```prose
|
||||
# Homeric
|
||||
host:
|
||||
security = trial "Check security"
|
||||
perf = trial "Check performance"
|
||||
style = trial "Check style"
|
||||
|
||||
trial "Synthesize review"
|
||||
tidings: { security, perf, style }
|
||||
```
|
||||
|
||||
### Loop with Condition
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
loop until **the code is bug-free** (max: 5):
|
||||
session "Find and fix bugs"
|
||||
```
|
||||
|
||||
```prose
|
||||
# Homeric
|
||||
ordeal until **the code is bug-free** (max: 5):
|
||||
trial "Find and fix bugs"
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
try:
|
||||
session "Risky operation"
|
||||
catch as err:
|
||||
session "Handle error"
|
||||
context: err
|
||||
finally:
|
||||
session "Cleanup"
|
||||
```
|
||||
|
||||
```prose
|
||||
# Homeric
|
||||
venture:
|
||||
trial "Risky operation"
|
||||
should ruin come as err:
|
||||
trial "Handle error"
|
||||
tidings: err
|
||||
in the end:
|
||||
trial "Cleanup"
|
||||
```
|
||||
|
||||
### Choice Block
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
choice **the severity level**:
|
||||
option "Critical":
|
||||
session "Escalate immediately"
|
||||
option "Minor":
|
||||
session "Log for later"
|
||||
```
|
||||
|
||||
```prose
|
||||
# Homeric
|
||||
crossroads **the severity level**:
|
||||
path "Critical":
|
||||
trial "Escalate immediately"
|
||||
path "Minor":
|
||||
trial "Log for later"
|
||||
```
|
||||
|
||||
### Conditionals
|
||||
|
||||
```prose
|
||||
# Functional
|
||||
if **has security issues**:
|
||||
session "Fix security"
|
||||
elif **has performance issues**:
|
||||
session "Optimize"
|
||||
else:
|
||||
session "Approve"
|
||||
```
|
||||
- `session` sample -> `trial`
|
||||
- `parallel` sample -> `host`
|
||||
- `loop` sample -> `ordeal`
|
||||
- `try/catch/finally` sample -> `venture` / `should ruin come` / `in the end`
|
||||
- `choice` sample -> `crossroads` / `path`
|
||||
|
||||
```prose
|
||||
# Homeric
|
||||
|
||||
35
extensions/open-prose/skills/prose/alts/shared-appendix.md
Normal file
35
extensions/open-prose/skills/prose/alts/shared-appendix.md
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
role: reference
|
||||
summary: Shared appendix for experimental OpenProse alternate registers.
|
||||
status: draft
|
||||
requires: prose.md
|
||||
---
|
||||
|
||||
# OpenProse Alternate Register Appendix
|
||||
|
||||
Use this appendix with experimental register files such as `arabian-nights.md` and `homer.md`.
|
||||
|
||||
## Unchanged keywords
|
||||
|
||||
These keywords already work or are too functional to replace sensibly:
|
||||
|
||||
- `**...**` discretion markers
|
||||
- `until`, `while`
|
||||
- `map`, `filter`, `reduce`, `pmap`
|
||||
- `max`
|
||||
- `as`
|
||||
- model names such as `sonnet`, `opus`, and `haiku`
|
||||
|
||||
## Comparison pattern
|
||||
|
||||
Use the translation map in each register file to rewrite the same functional sample programs:
|
||||
|
||||
- simple program
|
||||
- parallel execution
|
||||
- loop with condition
|
||||
- error handling
|
||||
- choice block
|
||||
- conditionals
|
||||
|
||||
The goal is consistency, not one canonical wording.
|
||||
Keep the functional version intact and rewrite only the register-specific aliases.
|
||||
@ -87,71 +87,28 @@ The `agents` and `agent_segments` tables for project-scoped agents live in `.pro
|
||||
|
||||
## Responsibility Separation
|
||||
|
||||
This section defines **who does what**. This is the contract between the VM and subagents.
|
||||
The VM/subagent contract matches [postgres.md](./postgres.md#responsibility-separation).
|
||||
|
||||
### VM Responsibilities
|
||||
SQLite-specific differences:
|
||||
|
||||
The VM (the orchestrating agent running the .prose program) is responsible for:
|
||||
- the VM creates `state.db` instead of an `openprose` schema
|
||||
- subagent confirmation messages point at a local database path, for example `.prose/runs/<runId>/state.db`
|
||||
- cleanup is typically `VACUUM` or file deletion rather than dropping schema objects
|
||||
|
||||
| Responsibility | Description |
|
||||
| ------------------------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| **Database creation** | Create `state.db` and initialize core tables at run start |
|
||||
| **Program registration** | Store the program source and metadata |
|
||||
| **Execution tracking** | Update position, status, and timing as statements execute |
|
||||
| **Subagent spawning** | Spawn sessions via Task tool with database path and instructions |
|
||||
| **Parallel coordination** | Track branch status, implement join strategies |
|
||||
| **Loop management** | Track iteration counts, evaluate conditions |
|
||||
| **Error aggregation** | Record failures, manage retry state |
|
||||
| **Context preservation** | Maintain sufficient narration in the main conversation thread so execution can be understood and resumed |
|
||||
| **Completion detection** | Mark the run as complete when finished |
|
||||
Example return values:
|
||||
|
||||
**Critical:** The VM must preserve enough context in its own conversation to understand execution state without re-reading the entire database. The database is for coordination and persistence, not a replacement for working memory.
|
||||
|
||||
### Subagent Responsibilities
|
||||
|
||||
Subagents (sessions spawned by the VM) are responsible for:
|
||||
|
||||
| Responsibility | Description |
|
||||
| ----------------------- | ----------------------------------------------------------------- |
|
||||
| **Writing own outputs** | Insert/update their binding in the `bindings` table |
|
||||
| **Memory management** | For persistent agents: read and update their memory record |
|
||||
| **Segment recording** | For persistent agents: append segment history |
|
||||
| **Attachment handling** | Write large outputs to `attachments/` directory, store path in DB |
|
||||
| **Atomic writes** | Use transactions when updating multiple related records |
|
||||
|
||||
**Critical:** Subagents write ONLY to `bindings`, `agents`, and `agent_segments` tables. The VM owns the `execution` table entirely. Completion signaling happens through the substrate (Task tool return), not database updates.
|
||||
|
||||
**Critical:** Subagents must write their outputs directly to the database. The VM does not write subagent outputs—it only reads them after the subagent completes.
|
||||
|
||||
**What subagents return to the VM:** A confirmation message with the binding location—not the full content:
|
||||
|
||||
**Root scope:**
|
||||
|
||||
```
|
||||
```text
|
||||
Binding written: research
|
||||
Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='research', execution_id=NULL)
|
||||
Summary: AI safety research covering alignment, robustness, and interpretability with 15 citations.
|
||||
```
|
||||
|
||||
**Inside block invocation:**
|
||||
|
||||
```
|
||||
```text
|
||||
Binding written: result
|
||||
Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='result', execution_id=43)
|
||||
Execution ID: 43
|
||||
Summary: Processed chunk into 3 sub-parts for recursive processing.
|
||||
```
|
||||
|
||||
The VM tracks locations, not values. This keeps the VM's context lean and enables arbitrarily large intermediate values.
|
||||
|
||||
### Shared Concerns
|
||||
|
||||
| Concern | Who Handles |
|
||||
| ---------------- | ------------------------------------------------------------------ |
|
||||
| Schema evolution | Either (use `CREATE TABLE IF NOT EXISTS`, `ALTER TABLE` as needed) |
|
||||
| Custom tables | Either (prefix with `x_` for extensions) |
|
||||
| Indexing | Either (add indexes for frequently-queried columns) |
|
||||
| Cleanup | VM (at run end, optionally vacuum) |
|
||||
The VM still tracks locations, not full values.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@ -137,6 +137,46 @@ describe("slackPlugin outbound", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("slackPlugin agentPrompt", () => {
|
||||
it("tells agents interactive replies are disabled by default", () => {
|
||||
const hints = slackPlugin.agentPrompt?.messageToolHints?.({
|
||||
cfg: {
|
||||
channels: {
|
||||
slack: {
|
||||
botToken: "xoxb-test",
|
||||
appToken: "xapp-test",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(hints).toEqual([
|
||||
"- Slack interactive replies are disabled. If needed, ask to set `channels.slack.capabilities.interactiveReplies=true` (or the same under `channels.slack.accounts.<account>.capabilities`).",
|
||||
]);
|
||||
});
|
||||
|
||||
it("shows Slack interactive reply directives when enabled", () => {
|
||||
const hints = slackPlugin.agentPrompt?.messageToolHints?.({
|
||||
cfg: {
|
||||
channels: {
|
||||
slack: {
|
||||
botToken: "xoxb-test",
|
||||
appToken: "xapp-test",
|
||||
capabilities: { interactiveReplies: true },
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(hints).toContain(
|
||||
"- Slack interactive replies: use `[[slack_buttons: Label:value, Other:other]]` to add action buttons that route clicks back as Slack interaction system events.",
|
||||
);
|
||||
expect(hints).toContain(
|
||||
"- Slack selects: use `[[slack_select: Placeholder | Label:value, Other:other]]` to add a static select menu that routes the chosen value back as a Slack interaction system event.",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("slackPlugin config", () => {
|
||||
it("treats HTTP mode accounts with bot token + signing secret as configured", async () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
|
||||
@ -29,6 +29,7 @@ import {
|
||||
resolveDefaultSlackAccountId,
|
||||
resolveSlackAccount,
|
||||
resolveSlackReplyToMode,
|
||||
isSlackInteractiveRepliesEnabled,
|
||||
resolveSlackGroupRequireMention,
|
||||
resolveSlackGroupToolPolicy,
|
||||
buildSlackThreadingToolContext,
|
||||
@ -146,6 +147,17 @@ export const slackPlugin: ChannelPlugin<ResolvedSlackAccount> = {
|
||||
media: true,
|
||||
nativeCommands: true,
|
||||
},
|
||||
agentPrompt: {
|
||||
messageToolHints: ({ cfg, accountId }) =>
|
||||
isSlackInteractiveRepliesEnabled({ cfg, accountId })
|
||||
? [
|
||||
"- Slack interactive replies: use `[[slack_buttons: Label:value, Other:other]]` to add action buttons that route clicks back as Slack interaction system events.",
|
||||
"- Slack selects: use `[[slack_select: Placeholder | Label:value, Other:other]]` to add a static select menu that routes the chosen value back as a Slack interaction system event.",
|
||||
]
|
||||
: [
|
||||
"- Slack interactive replies are disabled. If needed, ask to set `channels.slack.capabilities.interactiveReplies=true` (or the same under `channels.slack.accounts.<account>.capabilities`).",
|
||||
],
|
||||
},
|
||||
streaming: {
|
||||
blockStreamingCoalesceDefaults: { minChars: 1500, idleMs: 1000 },
|
||||
},
|
||||
|
||||
@ -162,41 +162,55 @@ export function isGroupInviteAllowed(
|
||||
}
|
||||
|
||||
// Helper to recursively extract text from inline content
|
||||
function renderInlineItem(
|
||||
item: any,
|
||||
options?: {
|
||||
linkMode?: "content-or-href" | "href";
|
||||
allowBreak?: boolean;
|
||||
allowBlockquote?: boolean;
|
||||
},
|
||||
): string {
|
||||
if (typeof item === "string") {
|
||||
return item;
|
||||
}
|
||||
if (!item || typeof item !== "object") {
|
||||
return "";
|
||||
}
|
||||
if (item.ship) {
|
||||
return item.ship;
|
||||
}
|
||||
if ("sect" in item) {
|
||||
return `@${item.sect || "all"}`;
|
||||
}
|
||||
if (options?.allowBreak && item.break !== undefined) {
|
||||
return "\n";
|
||||
}
|
||||
if (item["inline-code"]) {
|
||||
return `\`${item["inline-code"]}\``;
|
||||
}
|
||||
if (item.code) {
|
||||
return `\`${item.code}\``;
|
||||
}
|
||||
if (item.link && item.link.href) {
|
||||
return options?.linkMode === "href" ? item.link.href : item.link.content || item.link.href;
|
||||
}
|
||||
if (item.bold && Array.isArray(item.bold)) {
|
||||
return `**${extractInlineText(item.bold)}**`;
|
||||
}
|
||||
if (item.italics && Array.isArray(item.italics)) {
|
||||
return `*${extractInlineText(item.italics)}*`;
|
||||
}
|
||||
if (item.strike && Array.isArray(item.strike)) {
|
||||
return `~~${extractInlineText(item.strike)}~~`;
|
||||
}
|
||||
if (options?.allowBlockquote && item.blockquote && Array.isArray(item.blockquote)) {
|
||||
return `> ${extractInlineText(item.blockquote)}`;
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
function extractInlineText(items: any[]): string {
|
||||
return items
|
||||
.map((item: any) => {
|
||||
if (typeof item === "string") {
|
||||
return item;
|
||||
}
|
||||
if (item && typeof item === "object") {
|
||||
if (item.ship) {
|
||||
return item.ship;
|
||||
}
|
||||
if ("sect" in item) {
|
||||
return `@${item.sect || "all"}`;
|
||||
}
|
||||
if (item["inline-code"]) {
|
||||
return `\`${item["inline-code"]}\``;
|
||||
}
|
||||
if (item.code) {
|
||||
return `\`${item.code}\``;
|
||||
}
|
||||
if (item.link && item.link.href) {
|
||||
return item.link.content || item.link.href;
|
||||
}
|
||||
if (item.bold && Array.isArray(item.bold)) {
|
||||
return `**${extractInlineText(item.bold)}**`;
|
||||
}
|
||||
if (item.italics && Array.isArray(item.italics)) {
|
||||
return `*${extractInlineText(item.italics)}*`;
|
||||
}
|
||||
if (item.strike && Array.isArray(item.strike)) {
|
||||
return `~~${extractInlineText(item.strike)}~~`;
|
||||
}
|
||||
}
|
||||
return "";
|
||||
})
|
||||
.join("");
|
||||
return items.map((item: any) => renderInlineItem(item)).join("");
|
||||
}
|
||||
|
||||
export function extractMessageText(content: unknown): string {
|
||||
@ -209,48 +223,13 @@ export function extractMessageText(content: unknown): string {
|
||||
// Handle inline content (text, ships, links, etc.)
|
||||
if (verse.inline && Array.isArray(verse.inline)) {
|
||||
return verse.inline
|
||||
.map((item: any) => {
|
||||
if (typeof item === "string") {
|
||||
return item;
|
||||
}
|
||||
if (item && typeof item === "object") {
|
||||
if (item.ship) {
|
||||
return item.ship;
|
||||
}
|
||||
// Handle sect (role mentions like @all)
|
||||
if ("sect" in item) {
|
||||
return `@${item.sect || "all"}`;
|
||||
}
|
||||
if (item.break !== undefined) {
|
||||
return "\n";
|
||||
}
|
||||
if (item.link && item.link.href) {
|
||||
return item.link.href;
|
||||
}
|
||||
// Handle inline code (Tlon uses "inline-code" key)
|
||||
if (item["inline-code"]) {
|
||||
return `\`${item["inline-code"]}\``;
|
||||
}
|
||||
if (item.code) {
|
||||
return `\`${item.code}\``;
|
||||
}
|
||||
// Handle bold/italic/strike - recursively extract text
|
||||
if (item.bold && Array.isArray(item.bold)) {
|
||||
return `**${extractInlineText(item.bold)}**`;
|
||||
}
|
||||
if (item.italics && Array.isArray(item.italics)) {
|
||||
return `*${extractInlineText(item.italics)}*`;
|
||||
}
|
||||
if (item.strike && Array.isArray(item.strike)) {
|
||||
return `~~${extractInlineText(item.strike)}~~`;
|
||||
}
|
||||
// Handle blockquote inline
|
||||
if (item.blockquote && Array.isArray(item.blockquote)) {
|
||||
return `> ${extractInlineText(item.blockquote)}`;
|
||||
}
|
||||
}
|
||||
return "";
|
||||
})
|
||||
.map((item: any) =>
|
||||
renderInlineItem(item, {
|
||||
linkMode: "href",
|
||||
allowBreak: true,
|
||||
allowBlockquote: true,
|
||||
}),
|
||||
)
|
||||
.join("");
|
||||
}
|
||||
|
||||
|
||||
@ -12,6 +12,29 @@ export type UrbitChannelDeps = {
|
||||
fetchImpl?: (input: RequestInfo | URL, init?: RequestInit) => Promise<Response>;
|
||||
};
|
||||
|
||||
async function putUrbitChannel(
|
||||
deps: UrbitChannelDeps,
|
||||
params: { body: unknown; auditContext: string },
|
||||
) {
|
||||
return await urbitFetch({
|
||||
baseUrl: deps.baseUrl,
|
||||
path: `/~/channel/${deps.channelId}`,
|
||||
init: {
|
||||
method: "PUT",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Cookie: deps.cookie,
|
||||
},
|
||||
body: JSON.stringify(params.body),
|
||||
},
|
||||
ssrfPolicy: deps.ssrfPolicy,
|
||||
lookupFn: deps.lookupFn,
|
||||
fetchImpl: deps.fetchImpl,
|
||||
timeoutMs: 30_000,
|
||||
auditContext: params.auditContext,
|
||||
});
|
||||
}
|
||||
|
||||
export async function pokeUrbitChannel(
|
||||
deps: UrbitChannelDeps,
|
||||
params: { app: string; mark: string; json: unknown; auditContext: string },
|
||||
@ -26,21 +49,8 @@ export async function pokeUrbitChannel(
|
||||
json: params.json,
|
||||
};
|
||||
|
||||
const { response, release } = await urbitFetch({
|
||||
baseUrl: deps.baseUrl,
|
||||
path: `/~/channel/${deps.channelId}`,
|
||||
init: {
|
||||
method: "PUT",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Cookie: deps.cookie,
|
||||
},
|
||||
body: JSON.stringify([pokeData]),
|
||||
},
|
||||
ssrfPolicy: deps.ssrfPolicy,
|
||||
lookupFn: deps.lookupFn,
|
||||
fetchImpl: deps.fetchImpl,
|
||||
timeoutMs: 30_000,
|
||||
const { response, release } = await putUrbitChannel(deps, {
|
||||
body: [pokeData],
|
||||
auditContext: params.auditContext,
|
||||
});
|
||||
|
||||
@ -88,23 +98,7 @@ export async function createUrbitChannel(
|
||||
deps: UrbitChannelDeps,
|
||||
params: { body: unknown; auditContext: string },
|
||||
): Promise<void> {
|
||||
const { response, release } = await urbitFetch({
|
||||
baseUrl: deps.baseUrl,
|
||||
path: `/~/channel/${deps.channelId}`,
|
||||
init: {
|
||||
method: "PUT",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Cookie: deps.cookie,
|
||||
},
|
||||
body: JSON.stringify(params.body),
|
||||
},
|
||||
ssrfPolicy: deps.ssrfPolicy,
|
||||
lookupFn: deps.lookupFn,
|
||||
fetchImpl: deps.fetchImpl,
|
||||
timeoutMs: 30_000,
|
||||
auditContext: params.auditContext,
|
||||
});
|
||||
const { response, release } = await putUrbitChannel(deps, params);
|
||||
|
||||
try {
|
||||
if (!response.ok && response.status !== 204) {
|
||||
@ -116,30 +110,17 @@ export async function createUrbitChannel(
|
||||
}
|
||||
|
||||
export async function wakeUrbitChannel(deps: UrbitChannelDeps): Promise<void> {
|
||||
const { response, release } = await urbitFetch({
|
||||
baseUrl: deps.baseUrl,
|
||||
path: `/~/channel/${deps.channelId}`,
|
||||
init: {
|
||||
method: "PUT",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Cookie: deps.cookie,
|
||||
const { response, release } = await putUrbitChannel(deps, {
|
||||
body: [
|
||||
{
|
||||
id: Date.now(),
|
||||
action: "poke",
|
||||
ship: deps.ship,
|
||||
app: "hood",
|
||||
mark: "helm-hi",
|
||||
json: "Opening API channel",
|
||||
},
|
||||
body: JSON.stringify([
|
||||
{
|
||||
id: Date.now(),
|
||||
action: "poke",
|
||||
ship: deps.ship,
|
||||
app: "hood",
|
||||
mark: "helm-hi",
|
||||
json: "Opening API channel",
|
||||
},
|
||||
]),
|
||||
},
|
||||
ssrfPolicy: deps.ssrfPolicy,
|
||||
lookupFn: deps.lookupFn,
|
||||
fetchImpl: deps.fetchImpl,
|
||||
timeoutMs: 30_000,
|
||||
],
|
||||
auditContext: "tlon-urbit-channel-wake",
|
||||
});
|
||||
|
||||
|
||||
@ -15,6 +15,57 @@ vi.mock("@tloncorp/api", () => ({
|
||||
}));
|
||||
|
||||
describe("uploadImageFromUrl", () => {
|
||||
async function loadUploadMocks() {
|
||||
const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon");
|
||||
const { uploadFile } = await import("@tloncorp/api");
|
||||
const { uploadImageFromUrl } = await import("./upload.js");
|
||||
return {
|
||||
mockFetch: vi.mocked(fetchWithSsrFGuard),
|
||||
mockUploadFile: vi.mocked(uploadFile),
|
||||
uploadImageFromUrl,
|
||||
};
|
||||
}
|
||||
|
||||
type UploadMocks = Awaited<ReturnType<typeof loadUploadMocks>>;
|
||||
|
||||
function mockSuccessfulFetch(params: {
|
||||
mockFetch: UploadMocks["mockFetch"];
|
||||
blob: Blob;
|
||||
finalUrl: string;
|
||||
contentType: string;
|
||||
}) {
|
||||
params.mockFetch.mockResolvedValue({
|
||||
response: {
|
||||
ok: true,
|
||||
headers: new Headers({ "content-type": params.contentType }),
|
||||
blob: () => Promise.resolve(params.blob),
|
||||
} as unknown as Response,
|
||||
finalUrl: params.finalUrl,
|
||||
release: vi.fn().mockResolvedValue(undefined),
|
||||
});
|
||||
}
|
||||
|
||||
async function setupSuccessfulUpload(params?: {
|
||||
sourceUrl?: string;
|
||||
contentType?: string;
|
||||
uploadedUrl?: string;
|
||||
}) {
|
||||
const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks();
|
||||
const sourceUrl = params?.sourceUrl ?? "https://example.com/image.png";
|
||||
const contentType = params?.contentType ?? "image/png";
|
||||
const mockBlob = new Blob(["fake-image"], { type: contentType });
|
||||
mockSuccessfulFetch({
|
||||
mockFetch,
|
||||
blob: mockBlob,
|
||||
finalUrl: sourceUrl,
|
||||
contentType,
|
||||
});
|
||||
if (params?.uploadedUrl) {
|
||||
mockUploadFile.mockResolvedValue({ url: params.uploadedUrl });
|
||||
}
|
||||
return { mockBlob, mockUploadFile, uploadImageFromUrl };
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
@ -24,28 +75,10 @@ describe("uploadImageFromUrl", () => {
|
||||
});
|
||||
|
||||
it("fetches image and calls uploadFile, returns uploaded URL", async () => {
|
||||
const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon");
|
||||
const mockFetch = vi.mocked(fetchWithSsrFGuard);
|
||||
|
||||
const { uploadFile } = await import("@tloncorp/api");
|
||||
const mockUploadFile = vi.mocked(uploadFile);
|
||||
|
||||
// Mock fetchWithSsrFGuard to return a successful response with a blob
|
||||
const mockBlob = new Blob(["fake-image"], { type: "image/png" });
|
||||
mockFetch.mockResolvedValue({
|
||||
response: {
|
||||
ok: true,
|
||||
headers: new Headers({ "content-type": "image/png" }),
|
||||
blob: () => Promise.resolve(mockBlob),
|
||||
} as unknown as Response,
|
||||
finalUrl: "https://example.com/image.png",
|
||||
release: vi.fn().mockResolvedValue(undefined),
|
||||
const { mockBlob, mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload({
|
||||
uploadedUrl: "https://memex.tlon.network/uploaded.png",
|
||||
});
|
||||
|
||||
// Mock uploadFile to return a successful upload
|
||||
mockUploadFile.mockResolvedValue({ url: "https://memex.tlon.network/uploaded.png" });
|
||||
|
||||
const { uploadImageFromUrl } = await import("./upload.js");
|
||||
const result = await uploadImageFromUrl("https://example.com/image.png");
|
||||
|
||||
expect(result).toBe("https://memex.tlon.network/uploaded.png");
|
||||
@ -59,10 +92,8 @@ describe("uploadImageFromUrl", () => {
|
||||
});
|
||||
|
||||
it("returns original URL if fetch fails", async () => {
|
||||
const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon");
|
||||
const mockFetch = vi.mocked(fetchWithSsrFGuard);
|
||||
const { mockFetch, uploadImageFromUrl } = await loadUploadMocks();
|
||||
|
||||
// Mock fetchWithSsrFGuard to return a failed response
|
||||
mockFetch.mockResolvedValue({
|
||||
response: {
|
||||
ok: false,
|
||||
@ -72,35 +103,15 @@ describe("uploadImageFromUrl", () => {
|
||||
release: vi.fn().mockResolvedValue(undefined),
|
||||
});
|
||||
|
||||
const { uploadImageFromUrl } = await import("./upload.js");
|
||||
const result = await uploadImageFromUrl("https://example.com/image.png");
|
||||
|
||||
expect(result).toBe("https://example.com/image.png");
|
||||
});
|
||||
|
||||
it("returns original URL if upload fails", async () => {
|
||||
const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon");
|
||||
const mockFetch = vi.mocked(fetchWithSsrFGuard);
|
||||
|
||||
const { uploadFile } = await import("@tloncorp/api");
|
||||
const mockUploadFile = vi.mocked(uploadFile);
|
||||
|
||||
// Mock fetchWithSsrFGuard to return a successful response
|
||||
const mockBlob = new Blob(["fake-image"], { type: "image/png" });
|
||||
mockFetch.mockResolvedValue({
|
||||
response: {
|
||||
ok: true,
|
||||
headers: new Headers({ "content-type": "image/png" }),
|
||||
blob: () => Promise.resolve(mockBlob),
|
||||
} as unknown as Response,
|
||||
finalUrl: "https://example.com/image.png",
|
||||
release: vi.fn().mockResolvedValue(undefined),
|
||||
});
|
||||
|
||||
// Mock uploadFile to throw an error
|
||||
const { mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload();
|
||||
mockUploadFile.mockRejectedValue(new Error("Upload failed"));
|
||||
|
||||
const { uploadImageFromUrl } = await import("./upload.js");
|
||||
const result = await uploadImageFromUrl("https://example.com/image.png");
|
||||
|
||||
expect(result).toBe("https://example.com/image.png");
|
||||
@ -127,26 +138,18 @@ describe("uploadImageFromUrl", () => {
|
||||
});
|
||||
|
||||
it("extracts filename from URL path", async () => {
|
||||
const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon");
|
||||
const mockFetch = vi.mocked(fetchWithSsrFGuard);
|
||||
|
||||
const { uploadFile } = await import("@tloncorp/api");
|
||||
const mockUploadFile = vi.mocked(uploadFile);
|
||||
const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks();
|
||||
|
||||
const mockBlob = new Blob(["fake-image"], { type: "image/jpeg" });
|
||||
mockFetch.mockResolvedValue({
|
||||
response: {
|
||||
ok: true,
|
||||
headers: new Headers({ "content-type": "image/jpeg" }),
|
||||
blob: () => Promise.resolve(mockBlob),
|
||||
} as unknown as Response,
|
||||
mockSuccessfulFetch({
|
||||
mockFetch,
|
||||
blob: mockBlob,
|
||||
finalUrl: "https://example.com/path/to/my-image.jpg",
|
||||
release: vi.fn().mockResolvedValue(undefined),
|
||||
contentType: "image/jpeg",
|
||||
});
|
||||
|
||||
mockUploadFile.mockResolvedValue({ url: "https://memex.tlon.network/uploaded.jpg" });
|
||||
|
||||
const { uploadImageFromUrl } = await import("./upload.js");
|
||||
await uploadImageFromUrl("https://example.com/path/to/my-image.jpg");
|
||||
|
||||
expect(mockUploadFile).toHaveBeenCalledWith(
|
||||
@ -157,26 +160,18 @@ describe("uploadImageFromUrl", () => {
|
||||
});
|
||||
|
||||
it("uses default filename when URL has no path", async () => {
|
||||
const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon");
|
||||
const mockFetch = vi.mocked(fetchWithSsrFGuard);
|
||||
|
||||
const { uploadFile } = await import("@tloncorp/api");
|
||||
const mockUploadFile = vi.mocked(uploadFile);
|
||||
const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks();
|
||||
|
||||
const mockBlob = new Blob(["fake-image"], { type: "image/png" });
|
||||
mockFetch.mockResolvedValue({
|
||||
response: {
|
||||
ok: true,
|
||||
headers: new Headers({ "content-type": "image/png" }),
|
||||
blob: () => Promise.resolve(mockBlob),
|
||||
} as unknown as Response,
|
||||
mockSuccessfulFetch({
|
||||
mockFetch,
|
||||
blob: mockBlob,
|
||||
finalUrl: "https://example.com/",
|
||||
release: vi.fn().mockResolvedValue(undefined),
|
||||
contentType: "image/png",
|
||||
});
|
||||
|
||||
mockUploadFile.mockResolvedValue({ url: "https://memex.tlon.network/uploaded.png" });
|
||||
|
||||
const { uploadImageFromUrl } = await import("./upload.js");
|
||||
await uploadImageFromUrl("https://example.com/");
|
||||
|
||||
expect(mockUploadFile).toHaveBeenCalledWith(
|
||||
|
||||
@ -89,56 +89,18 @@ Notes:
|
||||
- Twilio/Telnyx/Plivo require a **publicly reachable** webhook URL.
|
||||
- `mock` is a local dev provider (no network calls).
|
||||
- Telnyx requires `telnyx.publicKey` (or `TELNYX_PUBLIC_KEY`) unless `skipSignatureVerification` is true.
|
||||
- `tunnel.allowNgrokFreeTierLoopbackBypass: true` allows Twilio webhooks with invalid signatures **only** when `tunnel.provider="ngrok"` and `serve.bind` is loopback (ngrok local agent). Use for local dev only.
|
||||
|
||||
Streaming security defaults:
|
||||
|
||||
- `streaming.preStartTimeoutMs` closes sockets that never send a valid `start` frame.
|
||||
- `streaming.maxPendingConnections` caps total unauthenticated pre-start sockets.
|
||||
- `streaming.maxPendingConnectionsPerIp` caps unauthenticated pre-start sockets per source IP.
|
||||
- `streaming.maxConnections` caps total open media stream sockets (pending + active).
|
||||
- advanced webhook, streaming, and tunnel notes: `https://docs.openclaw.ai/plugins/voice-call`
|
||||
|
||||
## Stale call reaper
|
||||
|
||||
Use `staleCallReaperSeconds` to end calls that never receive a terminal webhook
|
||||
(for example, notify-mode calls that never complete). The default is `0`
|
||||
(disabled).
|
||||
|
||||
Recommended ranges:
|
||||
|
||||
- **Production:** `120`–`300` seconds for notify-style flows.
|
||||
- Keep this value **higher than `maxDurationSeconds`** so normal calls can
|
||||
finish. A good starting point is `maxDurationSeconds + 30–60` seconds.
|
||||
|
||||
Example:
|
||||
|
||||
```json5
|
||||
{
|
||||
staleCallReaperSeconds: 360,
|
||||
}
|
||||
```
|
||||
See the plugin docs for recommended ranges and production examples:
|
||||
`https://docs.openclaw.ai/plugins/voice-call#stale-call-reaper`
|
||||
|
||||
## TTS for calls
|
||||
|
||||
Voice Call uses the core `messages.tts` configuration (OpenAI or ElevenLabs) for
|
||||
streaming speech on calls. You can override it under the plugin config with the
|
||||
same shape — overrides deep-merge with `messages.tts`.
|
||||
|
||||
```json5
|
||||
{
|
||||
tts: {
|
||||
provider: "openai",
|
||||
openai: {
|
||||
voice: "alloy",
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- Edge TTS is ignored for voice calls (telephony audio needs PCM; Edge output is unreliable).
|
||||
- Core TTS is used when Twilio media streaming is enabled; otherwise calls fall back to provider native voices.
|
||||
streaming speech on calls. Override examples and provider caveats live here:
|
||||
`https://docs.openclaw.ai/plugins/voice-call#tts-for-calls`
|
||||
|
||||
## CLI
|
||||
|
||||
|
||||
@ -9,121 +9,87 @@ import {
|
||||
} from "./manager.test-harness.js";
|
||||
|
||||
describe("CallManager verification on restore", () => {
|
||||
it("skips stale calls reported terminal by provider", async () => {
|
||||
async function initializeManager(params?: {
|
||||
callOverrides?: Parameters<typeof makePersistedCall>[0];
|
||||
providerResult?: FakeProvider["getCallStatusResult"];
|
||||
configureProvider?: (provider: FakeProvider) => void;
|
||||
configOverrides?: Partial<{ maxDurationSeconds: number }>;
|
||||
}) {
|
||||
const storePath = createTestStorePath();
|
||||
const call = makePersistedCall();
|
||||
const call = makePersistedCall(params?.callOverrides);
|
||||
writeCallsToStore(storePath, [call]);
|
||||
|
||||
const provider = new FakeProvider();
|
||||
provider.getCallStatusResult = { status: "completed", isTerminal: true };
|
||||
if (params?.providerResult) {
|
||||
provider.getCallStatusResult = params.providerResult;
|
||||
}
|
||||
params?.configureProvider?.(provider);
|
||||
|
||||
const config = VoiceCallConfigSchema.parse({
|
||||
enabled: true,
|
||||
provider: "plivo",
|
||||
fromNumber: "+15550000000",
|
||||
...params?.configOverrides,
|
||||
});
|
||||
const manager = new CallManager(config, storePath);
|
||||
await manager.initialize(provider, "https://example.com/voice/webhook");
|
||||
|
||||
return { call, manager };
|
||||
}
|
||||
|
||||
it("skips stale calls reported terminal by provider", async () => {
|
||||
const { manager } = await initializeManager({
|
||||
providerResult: { status: "completed", isTerminal: true },
|
||||
});
|
||||
|
||||
expect(manager.getActiveCalls()).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("keeps calls reported active by provider", async () => {
|
||||
const storePath = createTestStorePath();
|
||||
const call = makePersistedCall();
|
||||
writeCallsToStore(storePath, [call]);
|
||||
|
||||
const provider = new FakeProvider();
|
||||
provider.getCallStatusResult = { status: "in-progress", isTerminal: false };
|
||||
|
||||
const config = VoiceCallConfigSchema.parse({
|
||||
enabled: true,
|
||||
provider: "plivo",
|
||||
fromNumber: "+15550000000",
|
||||
const { call, manager } = await initializeManager({
|
||||
providerResult: { status: "in-progress", isTerminal: false },
|
||||
});
|
||||
const manager = new CallManager(config, storePath);
|
||||
await manager.initialize(provider, "https://example.com/voice/webhook");
|
||||
|
||||
expect(manager.getActiveCalls()).toHaveLength(1);
|
||||
expect(manager.getActiveCalls()[0]?.callId).toBe(call.callId);
|
||||
});
|
||||
|
||||
it("keeps calls when provider returns unknown (transient error)", async () => {
|
||||
const storePath = createTestStorePath();
|
||||
const call = makePersistedCall();
|
||||
writeCallsToStore(storePath, [call]);
|
||||
|
||||
const provider = new FakeProvider();
|
||||
provider.getCallStatusResult = { status: "error", isTerminal: false, isUnknown: true };
|
||||
|
||||
const config = VoiceCallConfigSchema.parse({
|
||||
enabled: true,
|
||||
provider: "plivo",
|
||||
fromNumber: "+15550000000",
|
||||
const { manager } = await initializeManager({
|
||||
providerResult: { status: "error", isTerminal: false, isUnknown: true },
|
||||
});
|
||||
const manager = new CallManager(config, storePath);
|
||||
await manager.initialize(provider, "https://example.com/voice/webhook");
|
||||
|
||||
expect(manager.getActiveCalls()).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("skips calls older than maxDurationSeconds", async () => {
|
||||
const storePath = createTestStorePath();
|
||||
const call = makePersistedCall({
|
||||
startedAt: Date.now() - 600_000,
|
||||
answeredAt: Date.now() - 590_000,
|
||||
const { manager } = await initializeManager({
|
||||
callOverrides: {
|
||||
startedAt: Date.now() - 600_000,
|
||||
answeredAt: Date.now() - 590_000,
|
||||
},
|
||||
configOverrides: { maxDurationSeconds: 300 },
|
||||
});
|
||||
writeCallsToStore(storePath, [call]);
|
||||
|
||||
const provider = new FakeProvider();
|
||||
|
||||
const config = VoiceCallConfigSchema.parse({
|
||||
enabled: true,
|
||||
provider: "plivo",
|
||||
fromNumber: "+15550000000",
|
||||
maxDurationSeconds: 300,
|
||||
});
|
||||
const manager = new CallManager(config, storePath);
|
||||
await manager.initialize(provider, "https://example.com/voice/webhook");
|
||||
|
||||
expect(manager.getActiveCalls()).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("skips calls without providerCallId", async () => {
|
||||
const storePath = createTestStorePath();
|
||||
const call = makePersistedCall({ providerCallId: undefined, state: "initiated" });
|
||||
writeCallsToStore(storePath, [call]);
|
||||
|
||||
const provider = new FakeProvider();
|
||||
|
||||
const config = VoiceCallConfigSchema.parse({
|
||||
enabled: true,
|
||||
provider: "plivo",
|
||||
fromNumber: "+15550000000",
|
||||
const { manager } = await initializeManager({
|
||||
callOverrides: { providerCallId: undefined, state: "initiated" },
|
||||
});
|
||||
const manager = new CallManager(config, storePath);
|
||||
await manager.initialize(provider, "https://example.com/voice/webhook");
|
||||
|
||||
expect(manager.getActiveCalls()).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("keeps call when getCallStatus throws (verification failure)", async () => {
|
||||
const storePath = createTestStorePath();
|
||||
const call = makePersistedCall();
|
||||
writeCallsToStore(storePath, [call]);
|
||||
|
||||
const provider = new FakeProvider();
|
||||
provider.getCallStatus = async () => {
|
||||
throw new Error("network failure");
|
||||
};
|
||||
|
||||
const config = VoiceCallConfigSchema.parse({
|
||||
enabled: true,
|
||||
provider: "plivo",
|
||||
fromNumber: "+15550000000",
|
||||
const { manager } = await initializeManager({
|
||||
configureProvider: (provider) => {
|
||||
provider.getCallStatus = async () => {
|
||||
throw new Error("network failure");
|
||||
};
|
||||
},
|
||||
});
|
||||
const manager = new CallManager(config, storePath);
|
||||
await manager.initialize(provider, "https://example.com/voice/webhook");
|
||||
|
||||
expect(manager.getActiveCalls()).toHaveLength(1);
|
||||
});
|
||||
|
||||
@ -21,6 +21,12 @@ function createContext(rawBody: string, query?: WebhookContext["query"]): Webhoo
|
||||
};
|
||||
}
|
||||
|
||||
function expectStreamingTwiml(body: string) {
|
||||
expect(body).toContain(STREAM_URL);
|
||||
expect(body).toContain('<Parameter name="token" value="');
|
||||
expect(body).toContain("<Connect>");
|
||||
}
|
||||
|
||||
describe("TwilioProvider", () => {
|
||||
it("returns streaming TwiML for outbound conversation calls before in-progress", () => {
|
||||
const provider = createProvider();
|
||||
@ -30,9 +36,8 @@ describe("TwilioProvider", () => {
|
||||
|
||||
const result = provider.parseWebhookEvent(ctx);
|
||||
|
||||
expect(result.providerResponseBody).toContain(STREAM_URL);
|
||||
expect(result.providerResponseBody).toContain('<Parameter name="token" value="');
|
||||
expect(result.providerResponseBody).toContain("<Connect>");
|
||||
expect(result.providerResponseBody).toBeDefined();
|
||||
expectStreamingTwiml(result.providerResponseBody ?? "");
|
||||
});
|
||||
|
||||
it("returns empty TwiML for status callbacks", () => {
|
||||
@ -55,9 +60,8 @@ describe("TwilioProvider", () => {
|
||||
|
||||
const result = provider.parseWebhookEvent(ctx);
|
||||
|
||||
expect(result.providerResponseBody).toContain(STREAM_URL);
|
||||
expect(result.providerResponseBody).toContain('<Parameter name="token" value="');
|
||||
expect(result.providerResponseBody).toContain("<Connect>");
|
||||
expect(result.providerResponseBody).toBeDefined();
|
||||
expectStreamingTwiml(result.providerResponseBody ?? "");
|
||||
});
|
||||
|
||||
it("returns queue TwiML for second inbound call when first call is active", () => {
|
||||
|
||||
@ -32,6 +32,41 @@ async function waitForPollingLoopStart(): Promise<void> {
|
||||
await vi.waitFor(() => expect(getUpdatesMock).toHaveBeenCalledTimes(1));
|
||||
}
|
||||
|
||||
const TEST_ACCOUNT = {
|
||||
accountId: "default",
|
||||
config: {},
|
||||
} as unknown as ResolvedZaloAccount;
|
||||
|
||||
const TEST_CONFIG = {} as OpenClawConfig;
|
||||
|
||||
function createLifecycleRuntime() {
|
||||
return {
|
||||
log: vi.fn<(message: string) => void>(),
|
||||
error: vi.fn<(message: string) => void>(),
|
||||
};
|
||||
}
|
||||
|
||||
async function startLifecycleMonitor(
|
||||
options: {
|
||||
useWebhook?: boolean;
|
||||
webhookSecret?: string;
|
||||
webhookUrl?: string;
|
||||
} = {},
|
||||
) {
|
||||
const { monitorZaloProvider } = await import("./monitor.js");
|
||||
const abort = new AbortController();
|
||||
const runtime = createLifecycleRuntime();
|
||||
const run = monitorZaloProvider({
|
||||
token: "test-token",
|
||||
account: TEST_ACCOUNT,
|
||||
config: TEST_CONFIG,
|
||||
runtime,
|
||||
abortSignal: abort.signal,
|
||||
...options,
|
||||
});
|
||||
return { abort, runtime, run };
|
||||
}
|
||||
|
||||
describe("monitorZaloProvider lifecycle", () => {
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
@ -39,26 +74,9 @@ describe("monitorZaloProvider lifecycle", () => {
|
||||
});
|
||||
|
||||
it("stays alive in polling mode until abort", async () => {
|
||||
const { monitorZaloProvider } = await import("./monitor.js");
|
||||
const abort = new AbortController();
|
||||
const runtime = {
|
||||
log: vi.fn<(message: string) => void>(),
|
||||
error: vi.fn<(message: string) => void>(),
|
||||
};
|
||||
const account = {
|
||||
accountId: "default",
|
||||
config: {},
|
||||
} as unknown as ResolvedZaloAccount;
|
||||
const config = {} as OpenClawConfig;
|
||||
|
||||
let settled = false;
|
||||
const run = monitorZaloProvider({
|
||||
token: "test-token",
|
||||
account,
|
||||
config,
|
||||
runtime,
|
||||
abortSignal: abort.signal,
|
||||
}).then(() => {
|
||||
const { abort, runtime, run } = await startLifecycleMonitor();
|
||||
const monitoredRun = run.then(() => {
|
||||
settled = true;
|
||||
});
|
||||
|
||||
@ -70,7 +88,7 @@ describe("monitorZaloProvider lifecycle", () => {
|
||||
expect(settled).toBe(false);
|
||||
|
||||
abort.abort();
|
||||
await run;
|
||||
await monitoredRun;
|
||||
|
||||
expect(settled).toBe(true);
|
||||
expect(runtime.log).toHaveBeenCalledWith(
|
||||
@ -84,25 +102,7 @@ describe("monitorZaloProvider lifecycle", () => {
|
||||
result: { url: "https://example.com/hooks/zalo" },
|
||||
});
|
||||
|
||||
const { monitorZaloProvider } = await import("./monitor.js");
|
||||
const abort = new AbortController();
|
||||
const runtime = {
|
||||
log: vi.fn<(message: string) => void>(),
|
||||
error: vi.fn<(message: string) => void>(),
|
||||
};
|
||||
const account = {
|
||||
accountId: "default",
|
||||
config: {},
|
||||
} as unknown as ResolvedZaloAccount;
|
||||
const config = {} as OpenClawConfig;
|
||||
|
||||
const run = monitorZaloProvider({
|
||||
token: "test-token",
|
||||
account,
|
||||
config,
|
||||
runtime,
|
||||
abortSignal: abort.signal,
|
||||
});
|
||||
const { abort, runtime, run } = await startLifecycleMonitor();
|
||||
|
||||
await waitForPollingLoopStart();
|
||||
|
||||
@ -120,25 +120,7 @@ describe("monitorZaloProvider lifecycle", () => {
|
||||
const { ZaloApiError } = await import("./api.js");
|
||||
getWebhookInfoMock.mockRejectedValueOnce(new ZaloApiError("Not Found", 404, "Not Found"));
|
||||
|
||||
const { monitorZaloProvider } = await import("./monitor.js");
|
||||
const abort = new AbortController();
|
||||
const runtime = {
|
||||
log: vi.fn<(message: string) => void>(),
|
||||
error: vi.fn<(message: string) => void>(),
|
||||
};
|
||||
const account = {
|
||||
accountId: "default",
|
||||
config: {},
|
||||
} as unknown as ResolvedZaloAccount;
|
||||
const config = {} as OpenClawConfig;
|
||||
|
||||
const run = monitorZaloProvider({
|
||||
token: "test-token",
|
||||
account,
|
||||
config,
|
||||
runtime,
|
||||
abortSignal: abort.signal,
|
||||
});
|
||||
const { abort, runtime, run } = await startLifecycleMonitor();
|
||||
|
||||
await waitForPollingLoopStart();
|
||||
|
||||
@ -165,29 +147,13 @@ describe("monitorZaloProvider lifecycle", () => {
|
||||
}),
|
||||
);
|
||||
|
||||
const { monitorZaloProvider } = await import("./monitor.js");
|
||||
const abort = new AbortController();
|
||||
const runtime = {
|
||||
log: vi.fn<(message: string) => void>(),
|
||||
error: vi.fn<(message: string) => void>(),
|
||||
};
|
||||
const account = {
|
||||
accountId: "default",
|
||||
config: {},
|
||||
} as unknown as ResolvedZaloAccount;
|
||||
const config = {} as OpenClawConfig;
|
||||
|
||||
let settled = false;
|
||||
const run = monitorZaloProvider({
|
||||
token: "test-token",
|
||||
account,
|
||||
config,
|
||||
runtime,
|
||||
abortSignal: abort.signal,
|
||||
const { abort, runtime, run } = await startLifecycleMonitor({
|
||||
useWebhook: true,
|
||||
webhookUrl: "https://example.com/hooks/zalo",
|
||||
webhookSecret: "supersecret", // pragma: allowlist secret
|
||||
}).then(() => {
|
||||
});
|
||||
const monitoredRun = run.then(() => {
|
||||
settled = true;
|
||||
});
|
||||
|
||||
@ -202,7 +168,7 @@ describe("monitorZaloProvider lifecycle", () => {
|
||||
expect(registry.httpRoutes).toHaveLength(1);
|
||||
|
||||
resolveDeleteWebhook?.();
|
||||
await run;
|
||||
await monitoredRun;
|
||||
|
||||
expect(settled).toBe(true);
|
||||
expect(registry.httpRoutes).toHaveLength(0);
|
||||
|
||||
@ -187,6 +187,31 @@ function installRuntime(params: {
|
||||
};
|
||||
}
|
||||
|
||||
function installGroupCommandAuthRuntime() {
|
||||
return installRuntime({
|
||||
resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) =>
|
||||
useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed),
|
||||
});
|
||||
}
|
||||
|
||||
async function processGroupControlCommand(params: {
|
||||
account: ResolvedZalouserAccount;
|
||||
content?: string;
|
||||
commandContent?: string;
|
||||
}) {
|
||||
await __testing.processMessage({
|
||||
message: createGroupMessage({
|
||||
content: params.content ?? "/new",
|
||||
commandContent: params.commandContent ?? "/new",
|
||||
hasAnyMention: true,
|
||||
wasExplicitlyMentioned: true,
|
||||
}),
|
||||
account: params.account,
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
});
|
||||
}
|
||||
|
||||
function createGroupMessage(overrides: Partial<ZaloInboundMessage> = {}): ZaloInboundMessage {
|
||||
return {
|
||||
threadId: "g-1",
|
||||
@ -229,57 +254,152 @@ describe("zalouser monitor group mention gating", () => {
|
||||
sendSeenZalouserMock.mockClear();
|
||||
});
|
||||
|
||||
it("skips unmentioned group messages when requireMention=true", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
|
||||
commandAuthorized: false,
|
||||
});
|
||||
async function processMessageWithDefaults(params: {
|
||||
message: ZaloInboundMessage;
|
||||
account?: ResolvedZalouserAccount;
|
||||
historyState?: {
|
||||
historyLimit: number;
|
||||
groupHistories: Map<
|
||||
string,
|
||||
Array<{ sender: string; body: string; timestamp?: number; messageId?: string }>
|
||||
>;
|
||||
};
|
||||
}) {
|
||||
await __testing.processMessage({
|
||||
message: createGroupMessage(),
|
||||
account: createAccount(),
|
||||
message: params.message,
|
||||
account: params.account ?? createAccount(),
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
historyState: params.historyState,
|
||||
});
|
||||
}
|
||||
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
|
||||
expect(sendTypingZalouserMock).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("fails closed when requireMention=true but mention detection is unavailable", async () => {
|
||||
async function expectSkippedGroupMessage(message?: Partial<ZaloInboundMessage>) {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
|
||||
commandAuthorized: false,
|
||||
});
|
||||
await __testing.processMessage({
|
||||
message: createGroupMessage({
|
||||
canResolveExplicitMention: false,
|
||||
hasAnyMention: false,
|
||||
wasExplicitlyMentioned: false,
|
||||
}),
|
||||
account: createAccount(),
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
await processMessageWithDefaults({
|
||||
message: createGroupMessage(message),
|
||||
});
|
||||
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
|
||||
expect(sendTypingZalouserMock).not.toHaveBeenCalled();
|
||||
});
|
||||
}
|
||||
|
||||
it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => {
|
||||
async function expectGroupCommandAuthorizers(params: {
|
||||
accountConfig: ResolvedZalouserAccount["config"];
|
||||
expectedAuthorizers: Array<{ configured: boolean; allowed: boolean }>;
|
||||
}) {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } =
|
||||
installGroupCommandAuthRuntime();
|
||||
await processGroupControlCommand({
|
||||
account: {
|
||||
...createAccount(),
|
||||
config: params.accountConfig,
|
||||
},
|
||||
});
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
|
||||
const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0];
|
||||
expect(authCall?.authorizers).toEqual(params.expectedAuthorizers);
|
||||
}
|
||||
|
||||
async function processOpenDmMessage(params?: {
|
||||
message?: Partial<ZaloInboundMessage>;
|
||||
readSessionUpdatedAt?: (input?: {
|
||||
storePath: string;
|
||||
sessionKey: string;
|
||||
}) => number | undefined;
|
||||
}) {
|
||||
const runtime = installRuntime({
|
||||
commandAuthorized: false,
|
||||
});
|
||||
if (params?.readSessionUpdatedAt) {
|
||||
runtime.readSessionUpdatedAt.mockImplementation(params.readSessionUpdatedAt);
|
||||
}
|
||||
const account = createAccount();
|
||||
await processMessageWithDefaults({
|
||||
message: createDmMessage(params?.message),
|
||||
account: {
|
||||
...account,
|
||||
config: {
|
||||
...account.config,
|
||||
dmPolicy: "open",
|
||||
},
|
||||
},
|
||||
});
|
||||
return runtime;
|
||||
}
|
||||
|
||||
async function expectDangerousNameMatching(params: {
|
||||
dangerouslyAllowNameMatching?: boolean;
|
||||
expectedDispatches: number;
|
||||
}) {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
|
||||
commandAuthorized: false,
|
||||
});
|
||||
await __testing.processMessage({
|
||||
await processMessageWithDefaults({
|
||||
message: createGroupMessage({
|
||||
threadId: "g-attacker-001",
|
||||
groupName: "Trusted Team",
|
||||
senderId: "666",
|
||||
hasAnyMention: true,
|
||||
wasExplicitlyMentioned: true,
|
||||
content: "ping @bot",
|
||||
}),
|
||||
account: createAccount(),
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
account: {
|
||||
...createAccount(),
|
||||
config: {
|
||||
...createAccount().config,
|
||||
...(params.dangerouslyAllowNameMatching ? { dangerouslyAllowNameMatching: true } : {}),
|
||||
groupPolicy: "allowlist",
|
||||
groupAllowFrom: ["*"],
|
||||
groups: {
|
||||
"group:g-trusted-001": { allow: true },
|
||||
"Trusted Team": { allow: true },
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(
|
||||
params.expectedDispatches,
|
||||
);
|
||||
return dispatchReplyWithBufferedBlockDispatcher;
|
||||
}
|
||||
|
||||
async function dispatchGroupMessage(params: {
|
||||
commandAuthorized: boolean;
|
||||
message: Partial<ZaloInboundMessage>;
|
||||
}) {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
|
||||
commandAuthorized: params.commandAuthorized,
|
||||
});
|
||||
await processMessageWithDefaults({
|
||||
message: createGroupMessage(params.message),
|
||||
});
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
|
||||
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
|
||||
return dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
|
||||
}
|
||||
|
||||
it("skips unmentioned group messages when requireMention=true", async () => {
|
||||
await expectSkippedGroupMessage();
|
||||
});
|
||||
|
||||
it("fails closed when requireMention=true but mention detection is unavailable", async () => {
|
||||
await expectSkippedGroupMessage({
|
||||
canResolveExplicitMention: false,
|
||||
hasAnyMention: false,
|
||||
wasExplicitlyMentioned: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => {
|
||||
const callArg = await dispatchGroupMessage({
|
||||
commandAuthorized: false,
|
||||
message: {
|
||||
hasAnyMention: true,
|
||||
wasExplicitlyMentioned: true,
|
||||
content: "ping @bot",
|
||||
},
|
||||
});
|
||||
expect(callArg?.ctx?.WasMentioned).toBe(true);
|
||||
expect(callArg?.ctx?.To).toBe("zalouser:group:g-1");
|
||||
expect(callArg?.ctx?.OriginatingTo).toBe("zalouser:group:g-1");
|
||||
@ -290,22 +410,14 @@ describe("zalouser monitor group mention gating", () => {
|
||||
});
|
||||
|
||||
it("allows authorized control commands to bypass mention gating", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
|
||||
const callArg = await dispatchGroupMessage({
|
||||
commandAuthorized: true,
|
||||
});
|
||||
await __testing.processMessage({
|
||||
message: createGroupMessage({
|
||||
message: {
|
||||
content: "/status",
|
||||
hasAnyMention: false,
|
||||
wasExplicitlyMentioned: false,
|
||||
}),
|
||||
account: createAccount(),
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
},
|
||||
});
|
||||
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
|
||||
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
|
||||
expect(callArg?.ctx?.WasMentioned).toBe(true);
|
||||
});
|
||||
|
||||
@ -346,57 +458,30 @@ describe("zalouser monitor group mention gating", () => {
|
||||
});
|
||||
|
||||
it("uses commandContent for mention-prefixed control commands", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
|
||||
const callArg = await dispatchGroupMessage({
|
||||
commandAuthorized: true,
|
||||
});
|
||||
await __testing.processMessage({
|
||||
message: createGroupMessage({
|
||||
message: {
|
||||
content: "@Bot /new",
|
||||
commandContent: "/new",
|
||||
hasAnyMention: true,
|
||||
wasExplicitlyMentioned: true,
|
||||
}),
|
||||
account: createAccount(),
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
},
|
||||
});
|
||||
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
|
||||
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
|
||||
expect(callArg?.ctx?.CommandBody).toBe("/new");
|
||||
expect(callArg?.ctx?.BodyForCommands).toBe("/new");
|
||||
});
|
||||
|
||||
it("allows group control commands when only allowFrom is configured", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } =
|
||||
installRuntime({
|
||||
resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) =>
|
||||
useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed),
|
||||
});
|
||||
await __testing.processMessage({
|
||||
message: createGroupMessage({
|
||||
content: "/new",
|
||||
commandContent: "/new",
|
||||
hasAnyMention: true,
|
||||
wasExplicitlyMentioned: true,
|
||||
}),
|
||||
account: {
|
||||
...createAccount(),
|
||||
config: {
|
||||
...createAccount().config,
|
||||
allowFrom: ["123"],
|
||||
},
|
||||
await expectGroupCommandAuthorizers({
|
||||
accountConfig: {
|
||||
...createAccount().config,
|
||||
allowFrom: ["123"],
|
||||
},
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
expectedAuthorizers: [
|
||||
{ configured: true, allowed: true },
|
||||
{ configured: true, allowed: true },
|
||||
],
|
||||
});
|
||||
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
|
||||
const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0];
|
||||
expect(authCall?.authorizers).toEqual([
|
||||
{ configured: true, allowed: true },
|
||||
{ configured: true, allowed: true },
|
||||
]);
|
||||
});
|
||||
|
||||
it("blocks group messages when sender is not in groupAllowFrom/allowFrom", async () => {
|
||||
@ -425,123 +510,35 @@ describe("zalouser monitor group mention gating", () => {
|
||||
});
|
||||
|
||||
it("does not accept a different group id by matching only the mutable group name by default", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
|
||||
commandAuthorized: false,
|
||||
});
|
||||
await __testing.processMessage({
|
||||
message: createGroupMessage({
|
||||
threadId: "g-attacker-001",
|
||||
groupName: "Trusted Team",
|
||||
senderId: "666",
|
||||
hasAnyMention: true,
|
||||
wasExplicitlyMentioned: true,
|
||||
content: "ping @bot",
|
||||
}),
|
||||
account: {
|
||||
...createAccount(),
|
||||
config: {
|
||||
...createAccount().config,
|
||||
groupPolicy: "allowlist",
|
||||
groupAllowFrom: ["*"],
|
||||
groups: {
|
||||
"group:g-trusted-001": { allow: true },
|
||||
"Trusted Team": { allow: true },
|
||||
},
|
||||
},
|
||||
},
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
});
|
||||
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
|
||||
await expectDangerousNameMatching({ expectedDispatches: 0 });
|
||||
});
|
||||
|
||||
it("accepts mutable group-name matches only when dangerouslyAllowNameMatching is enabled", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
|
||||
commandAuthorized: false,
|
||||
const dispatchReplyWithBufferedBlockDispatcher = await expectDangerousNameMatching({
|
||||
dangerouslyAllowNameMatching: true,
|
||||
expectedDispatches: 1,
|
||||
});
|
||||
await __testing.processMessage({
|
||||
message: createGroupMessage({
|
||||
threadId: "g-attacker-001",
|
||||
groupName: "Trusted Team",
|
||||
senderId: "666",
|
||||
hasAnyMention: true,
|
||||
wasExplicitlyMentioned: true,
|
||||
content: "ping @bot",
|
||||
}),
|
||||
account: {
|
||||
...createAccount(),
|
||||
config: {
|
||||
...createAccount().config,
|
||||
dangerouslyAllowNameMatching: true,
|
||||
groupPolicy: "allowlist",
|
||||
groupAllowFrom: ["*"],
|
||||
groups: {
|
||||
"group:g-trusted-001": { allow: true },
|
||||
"Trusted Team": { allow: true },
|
||||
},
|
||||
},
|
||||
},
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
});
|
||||
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
|
||||
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
|
||||
expect(callArg?.ctx?.To).toBe("zalouser:group:g-attacker-001");
|
||||
});
|
||||
|
||||
it("allows group control commands when sender is in groupAllowFrom", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } =
|
||||
installRuntime({
|
||||
resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) =>
|
||||
useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed),
|
||||
});
|
||||
await __testing.processMessage({
|
||||
message: createGroupMessage({
|
||||
content: "/new",
|
||||
commandContent: "/new",
|
||||
hasAnyMention: true,
|
||||
wasExplicitlyMentioned: true,
|
||||
}),
|
||||
account: {
|
||||
...createAccount(),
|
||||
config: {
|
||||
...createAccount().config,
|
||||
allowFrom: ["999"],
|
||||
groupAllowFrom: ["123"],
|
||||
},
|
||||
await expectGroupCommandAuthorizers({
|
||||
accountConfig: {
|
||||
...createAccount().config,
|
||||
allowFrom: ["999"],
|
||||
groupAllowFrom: ["123"],
|
||||
},
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
expectedAuthorizers: [
|
||||
{ configured: true, allowed: false },
|
||||
{ configured: true, allowed: true },
|
||||
],
|
||||
});
|
||||
|
||||
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
|
||||
const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0];
|
||||
expect(authCall?.authorizers).toEqual([
|
||||
{ configured: true, allowed: false },
|
||||
{ configured: true, allowed: true },
|
||||
]);
|
||||
});
|
||||
|
||||
it("routes DM messages with direct peer kind", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher, resolveAgentRoute, buildAgentSessionKey } =
|
||||
installRuntime({
|
||||
commandAuthorized: false,
|
||||
});
|
||||
const account = createAccount();
|
||||
await __testing.processMessage({
|
||||
message: createDmMessage(),
|
||||
account: {
|
||||
...account,
|
||||
config: {
|
||||
...account.config,
|
||||
dmPolicy: "open",
|
||||
},
|
||||
},
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
});
|
||||
await processOpenDmMessage();
|
||||
|
||||
expect(resolveAgentRoute).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
@ -559,24 +556,9 @@ describe("zalouser monitor group mention gating", () => {
|
||||
});
|
||||
|
||||
it("reuses the legacy DM session key when only the old group-shaped session exists", async () => {
|
||||
const { dispatchReplyWithBufferedBlockDispatcher, readSessionUpdatedAt } = installRuntime({
|
||||
commandAuthorized: false,
|
||||
});
|
||||
readSessionUpdatedAt.mockImplementation((input?: { storePath: string; sessionKey: string }) =>
|
||||
input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined,
|
||||
);
|
||||
const account = createAccount();
|
||||
await __testing.processMessage({
|
||||
message: createDmMessage(),
|
||||
account: {
|
||||
...account,
|
||||
config: {
|
||||
...account.config,
|
||||
dmPolicy: "open",
|
||||
},
|
||||
},
|
||||
config: createConfig(),
|
||||
runtime: createRuntimeEnv(),
|
||||
const { dispatchReplyWithBufferedBlockDispatcher } = await processOpenDmMessage({
|
||||
readSessionUpdatedAt: (input?: { storePath: string; sessionKey: string }) =>
|
||||
input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined,
|
||||
});
|
||||
|
||||
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
|
||||
|
||||
@ -353,6 +353,7 @@
|
||||
"@mariozechner/pi-ai": "0.57.1",
|
||||
"@mariozechner/pi-coding-agent": "0.57.1",
|
||||
"@mariozechner/pi-tui": "0.57.1",
|
||||
"@modelcontextprotocol/sdk": "1.27.1",
|
||||
"@mozilla/readability": "^0.6.0",
|
||||
"@sinclair/typebox": "0.34.48",
|
||||
"@slack/bolt": "^4.6.0",
|
||||
|
||||
451
pnpm-lock.yaml
generated
451
pnpm-lock.yaml
generated
@ -60,16 +60,19 @@ importers:
|
||||
version: 1.2.0-beta.3
|
||||
'@mariozechner/pi-agent-core':
|
||||
specifier: 0.57.1
|
||||
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
|
||||
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
|
||||
'@mariozechner/pi-ai':
|
||||
specifier: 0.57.1
|
||||
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
|
||||
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
|
||||
'@mariozechner/pi-coding-agent':
|
||||
specifier: 0.57.1
|
||||
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
|
||||
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
|
||||
'@mariozechner/pi-tui':
|
||||
specifier: 0.57.1
|
||||
version: 0.57.1
|
||||
'@modelcontextprotocol/sdk':
|
||||
specifier: 1.27.1
|
||||
version: 1.27.1(zod@4.3.6)
|
||||
'@mozilla/readability':
|
||||
specifier: ^0.6.0
|
||||
version: 0.6.0
|
||||
@ -344,9 +347,10 @@ importers:
|
||||
google-auth-library:
|
||||
specifier: ^10.6.1
|
||||
version: 10.6.1
|
||||
devDependencies:
|
||||
openclaw:
|
||||
specifier: '>=2026.3.11'
|
||||
version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3))
|
||||
specifier: workspace:*
|
||||
version: link:../..
|
||||
|
||||
extensions/imessage: {}
|
||||
|
||||
@ -377,7 +381,7 @@ importers:
|
||||
dependencies:
|
||||
'@mariozechner/pi-agent-core':
|
||||
specifier: 0.57.1
|
||||
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
|
||||
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
|
||||
'@matrix-org/matrix-sdk-crypto-nodejs':
|
||||
specifier: ^0.4.0
|
||||
version: 0.4.0
|
||||
@ -404,10 +408,10 @@ importers:
|
||||
version: 4.3.6
|
||||
|
||||
extensions/memory-core:
|
||||
dependencies:
|
||||
devDependencies:
|
||||
openclaw:
|
||||
specifier: '>=2026.3.11'
|
||||
version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3))
|
||||
specifier: workspace:*
|
||||
version: link:../..
|
||||
|
||||
extensions/memory-lancedb:
|
||||
dependencies:
|
||||
@ -651,10 +655,6 @@ packages:
|
||||
resolution: {integrity: sha512-t8cl+bPLlHZQD2Sw1a4hSLUybqJZU71+m8znkyeU8CHntFqEp2mMbuLKdHKaAYQ1fAApXMsvzenCAkDzNeeJlw==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/client-bedrock@3.1007.0':
|
||||
resolution: {integrity: sha512-49hH8o6ALKkCiBUgg20HkwxNamP1yYA/n8Si73Z438EqhZGpCfScP3FfxVhrfD5o+4bV4Whi9BTzPKCa/PfUww==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/client-bedrock@3.1008.0':
|
||||
resolution: {integrity: sha512-mzxO/DplpZZT7AIZUCG7Q78OlaeHeDybYz+ZlWZPaXFjGDJwUv1E3SKskmaaQvTsMeieie0WX7gzueYrCx4YfQ==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
@ -711,10 +711,6 @@ packages:
|
||||
resolution: {integrity: sha512-dFqh7nfX43B8dO1aPQHOcjC0SnCJ83H3F+1LoCh3X1P7E7N09I+0/taID0asU6GCddfDExqnEvQtDdkuMe5tKQ==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/credential-provider-ini@3.972.18':
|
||||
resolution: {integrity: sha512-vthIAXJISZnj2576HeyLBj4WTeX+I7PwWeRkbOa0mVX39K13SCGxCgOFuKj2ytm9qTlLOmXe4cdEnroteFtJfw==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/credential-provider-ini@3.972.19':
|
||||
resolution: {integrity: sha512-pVJVjWqVrPqjpFq7o0mCmeZu1Y0c94OCHSYgivdCD2wfmYVtBbwQErakruhgOD8pcMcx9SCqRw1pzHKR7OGBcA==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
@ -727,10 +723,6 @@ packages:
|
||||
resolution: {integrity: sha512-gf2E5b7LpKb+JX2oQsRIDxdRZjBFZt2olCGlWCdb3vBERbXIPgm2t1R5mEnwd4j0UEO/Tbg5zN2KJbHXttJqwA==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/credential-provider-login@3.972.18':
|
||||
resolution: {integrity: sha512-kINzc5BBxdYBkPZ0/i1AMPMOk5b5QaFNbYMElVw5QTX13AKj6jcxnv/YNl9oW9mg+Y08ti19hh01HhyEAxsSJQ==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/credential-provider-login@3.972.19':
|
||||
resolution: {integrity: sha512-jOXdZ1o+CywQKr6gyxgxuUmnGwTTnY2Kxs1PM7fI6AYtDWDnmW/yKXayNqkF8KjP1unflqMWKVbVt5VgmE3L0g==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
@ -743,10 +735,6 @@ packages:
|
||||
resolution: {integrity: sha512-ZDJa2gd1xiPg/nBDGhUlat02O8obaDEnICBAVS8qieZ0+nDfaB0Z3ec6gjZj27OqFTjnB/Q5a0GwQwb7rMVViw==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/credential-provider-node@3.972.19':
|
||||
resolution: {integrity: sha512-yDWQ9dFTr+IMxwanFe7+tbN5++q8psZBjlUwOiCXn1EzANoBgtqBwcpYcHaMGtn0Wlfj4NuXdf2JaEx1lz5RaQ==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/credential-provider-node@3.972.20':
|
||||
resolution: {integrity: sha512-0xHca2BnPY0kzjDYPH7vk8YbfdBPpWVS67rtqQMalYDQUCBYS37cZ55K6TuFxCoIyNZgSCFrVKr9PXC5BVvQQw==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
@ -771,10 +759,6 @@ packages:
|
||||
resolution: {integrity: sha512-wGtte+48xnhnhHMl/MsxzacBPs5A+7JJedjiP452IkHY7vsbYKcvQBqFye8LwdTJVeHtBHv+JFeTscnwepoWGg==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/credential-provider-sso@3.972.18':
|
||||
resolution: {integrity: sha512-YHYEfj5S2aqInRt5ub8nDOX8vAxgMvd84wm2Y3WVNfFa/53vOv9T7WOAqXI25qjj3uEcV46xxfqdDQk04h5XQA==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/credential-provider-sso@3.972.19':
|
||||
resolution: {integrity: sha512-kVjQsEU3b///q7EZGrUzol9wzwJFKbEzqJKSq82A9ShrUTEO7FNylTtby3sPV19ndADZh1H3FB3+5ZrvKtEEeg==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
@ -787,10 +771,6 @@ packages:
|
||||
resolution: {integrity: sha512-8aiVJh6fTdl8gcyL+sVNcNwTtWpmoFa1Sh7xlj6Z7L/cZ/tYMEBHq44wTYG8Kt0z/PpGNopD89nbj3FHl9QmTA==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/credential-provider-web-identity@3.972.18':
|
||||
resolution: {integrity: sha512-OqlEQpJ+J3T5B96qtC1zLLwkBloechP+fezKbCH0sbd2cCc0Ra55XpxWpk/hRj69xAOYtHvoC4orx6eTa4zU7g==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/credential-provider-web-identity@3.972.19':
|
||||
resolution: {integrity: sha512-BV1BlTFdG4w4tAihxN7iXDBoNcNewXD4q8uZlNQiUrnqxwGWUhKHODIQVSPlQGxXClEj+63m+cqZskw+ESmeZg==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
@ -875,10 +855,6 @@ packages:
|
||||
resolution: {integrity: sha512-MlGWA8uPaOs5AiTZ5JLM4uuWDm9EEAnm9cqwvqQIc6kEgel/8s1BaOWm9QgUcfc9K8qd7KkC3n43yDbeXOA2tg==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/nested-clients@3.996.8':
|
||||
resolution: {integrity: sha512-6HlLm8ciMW8VzfB80kfIx16PBA9lOa9Dl+dmCBi78JDhvGlx3I7Rorwi5PpVRkL31RprXnYna3yBf6UKkD/PqA==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/nested-clients@3.996.9':
|
||||
resolution: {integrity: sha512-+RpVtpmQbbtzFOKhMlsRcXM/3f1Z49qTOHaA8gEpHOYruERmog6f2AUtf/oTRLCWjR9H2b3roqryV/hI7QMW8w==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
@ -903,14 +879,6 @@ packages:
|
||||
resolution: {integrity: sha512-j9BwZZId9sFp+4GPhf6KrwO8Tben2sXibZA8D1vv2I1zBdvkUHcBA2g4pkqIpTRalMTLC0NPkBPX0gERxfy/iA==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/token-providers@3.1005.0':
|
||||
resolution: {integrity: sha512-vMxd+ivKqSxU9bHx5vmAlFKDAkjGotFU56IOkDa5DaTu1WWwbcse0yFHEm9I537oVvodaiwMl3VBwgHfzQ2rvw==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/token-providers@3.1007.0':
|
||||
resolution: {integrity: sha512-kKvVyr53vvVc5k6RbvI6jhafxufxO2SkEw8QeEzJqwOXH/IMY7Cm0IyhnBGdqj80iiIIiIM2jGe7Fn3TIdwdrw==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@aws-sdk/token-providers@3.1008.0':
|
||||
resolution: {integrity: sha512-TulwlHQBWcJs668kNUDMZHN51DeLrDsYT59Ux4a/nbvr025gM6HjKJJ3LvnZccam7OS/ZKUVkWomCneRQKJbBg==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
@ -979,15 +947,6 @@ packages:
|
||||
aws-crt:
|
||||
optional: true
|
||||
|
||||
'@aws-sdk/util-user-agent-node@3.973.5':
|
||||
resolution: {integrity: sha512-Dyy38O4GeMk7UQ48RupfHif//gqnOPbq/zlvRssc11E2mClT+aUfc3VS2yD8oLtzqO3RsqQ9I3gOBB4/+HjPOw==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
peerDependencies:
|
||||
aws-crt: '>=1.0.0'
|
||||
peerDependenciesMeta:
|
||||
aws-crt:
|
||||
optional: true
|
||||
|
||||
'@aws-sdk/util-user-agent-node@3.973.6':
|
||||
resolution: {integrity: sha512-iF7G0prk7AvmOK64FcLvc/fW+Ty1H+vttajL7PvJFReU8urMxfYmynTTuFKDTA76Wgpq3FzTPKwabMQIXQHiXQ==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
@ -1828,6 +1787,16 @@ packages:
|
||||
'@mistralai/mistralai@1.14.1':
|
||||
resolution: {integrity: sha512-IiLmmZFCCTReQgPAT33r7KQ1nYo5JPdvGkrkZqA8qQ2qB1GHgs5LoP5K2ICyrjnpw2n8oSxMM/VP+liiKcGNlQ==}
|
||||
|
||||
'@modelcontextprotocol/sdk@1.27.1':
|
||||
resolution: {integrity: sha512-sr6GbP+4edBwFndLbM60gf07z0FQ79gaExpnsjMGePXqFcSSb7t6iscpjk9DhFhwd+mTEQrzNafGP8/iGGFYaA==}
|
||||
engines: {node: '>=18'}
|
||||
peerDependencies:
|
||||
'@cfworker/json-schema': ^4.1.1
|
||||
zod: ^3.25 || ^4.0
|
||||
peerDependenciesMeta:
|
||||
'@cfworker/json-schema':
|
||||
optional: true
|
||||
|
||||
'@mozilla/readability@0.6.0':
|
||||
resolution: {integrity: sha512-juG5VWh4qAivzTAeMzvY9xs9HY5rAcr2E4I7tiSSCokRFi7XIZCAu92ZkSTsIj1OPceCifL3cpfteP3pDT9/QQ==}
|
||||
engines: {node: '>=14.0.0'}
|
||||
@ -4271,6 +4240,10 @@ packages:
|
||||
core-util-is@1.0.3:
|
||||
resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==}
|
||||
|
||||
cors@2.8.6:
|
||||
resolution: {integrity: sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==}
|
||||
engines: {node: '>= 0.10'}
|
||||
|
||||
croner@10.0.1:
|
||||
resolution: {integrity: sha512-ixNtAJndqh173VQ4KodSdJEI6nuioBWI0V1ITNKhZZsO0pEMoDxz539T4FTTbSZ/xIOSuDnzxLVRqBVSvPNE2g==}
|
||||
engines: {node: '>=18.0'}
|
||||
@ -4550,6 +4523,14 @@ packages:
|
||||
events-universal@1.0.1:
|
||||
resolution: {integrity: sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==}
|
||||
|
||||
eventsource-parser@3.0.6:
|
||||
resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==}
|
||||
engines: {node: '>=18.0.0'}
|
||||
|
||||
eventsource@3.0.7:
|
||||
resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==}
|
||||
engines: {node: '>=18.0.0'}
|
||||
|
||||
execa@4.1.0:
|
||||
resolution: {integrity: sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==}
|
||||
engines: {node: '>=10'}
|
||||
@ -4561,6 +4542,12 @@ packages:
|
||||
exponential-backoff@3.1.3:
|
||||
resolution: {integrity: sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==}
|
||||
|
||||
express-rate-limit@8.3.1:
|
||||
resolution: {integrity: sha512-D1dKN+cmyPWuvB+G2SREQDzPY1agpBIcTa9sJxOPMCNeH3gwzhqJRDWCXW3gg0y//+LQ/8j52JbMROWyrKdMdw==}
|
||||
engines: {node: '>= 16'}
|
||||
peerDependencies:
|
||||
express: '>= 4.11'
|
||||
|
||||
express@4.22.1:
|
||||
resolution: {integrity: sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==}
|
||||
engines: {node: '>= 0.10.0'}
|
||||
@ -5058,6 +5045,9 @@ packages:
|
||||
jose@4.15.9:
|
||||
resolution: {integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==}
|
||||
|
||||
jose@6.2.1:
|
||||
resolution: {integrity: sha512-jUaKr1yrbfaImV7R2TN/b3IcZzsw38/chqMpo2XJ7i2F8AfM/lA4G1goC3JVEwg0H7UldTmSt3P68nt31W7/mw==}
|
||||
|
||||
js-stringify@1.0.2:
|
||||
resolution: {integrity: sha512-rtS5ATOo2Q5k1G+DADISilDA6lv79zIiwFd6CcjuIxGKLFm5C+RLImRscVap9k55i+MOZwgliw+NejvkLuGD5g==}
|
||||
|
||||
@ -5102,6 +5092,9 @@ packages:
|
||||
json-schema-traverse@1.0.0:
|
||||
resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==}
|
||||
|
||||
json-schema-typed@8.0.2:
|
||||
resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==}
|
||||
|
||||
json-schema@0.4.0:
|
||||
resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==}
|
||||
|
||||
@ -5689,14 +5682,6 @@ packages:
|
||||
zod:
|
||||
optional: true
|
||||
|
||||
openclaw@2026.3.11:
|
||||
resolution: {integrity: sha512-bxwiBmHPakwfpY5tqC9lrV5TCu5PKf0c1bHNc3nhrb+pqKcPEWV4zOjDVFLQUHr98ihgWA+3pacy4b3LQ8wduQ==}
|
||||
engines: {node: '>=22.12.0'}
|
||||
hasBin: true
|
||||
peerDependencies:
|
||||
'@napi-rs/canvas': ^0.1.89
|
||||
node-llama-cpp: 3.16.2
|
||||
|
||||
opus-decoder@0.7.11:
|
||||
resolution: {integrity: sha512-+e+Jz3vGQLxRTBHs8YJQPRPc1Tr+/aC6coV/DlZylriA29BdHQAYXhvNRKtjftof17OFng0+P4wsFIqQu3a48A==}
|
||||
|
||||
@ -5870,6 +5855,10 @@ packages:
|
||||
resolution: {integrity: sha512-8OEwKp5juEvb/MjpIc4hjqfgCNysrS94RIOMXYvpYCdm/jglrKEiAYmiumbmGhCvs+IcInsphYDFwqrjr7398w==}
|
||||
hasBin: true
|
||||
|
||||
pkce-challenge@5.0.1:
|
||||
resolution: {integrity: sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==}
|
||||
engines: {node: '>=16.20.0'}
|
||||
|
||||
playwright-core@1.58.2:
|
||||
resolution: {integrity: sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==}
|
||||
engines: {node: '>=18'}
|
||||
@ -6667,10 +6656,6 @@ packages:
|
||||
undici-types@7.18.2:
|
||||
resolution: {integrity: sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==}
|
||||
|
||||
undici@7.22.0:
|
||||
resolution: {integrity: sha512-RqslV2Us5BrllB+JeiZnK4peryVTndy9Dnqq62S3yYRRTj0tFQCwEniUy2167skdGOy3vqRzEvl1Dm4sV2ReDg==}
|
||||
engines: {node: '>=20.18.1'}
|
||||
|
||||
undici@7.24.0:
|
||||
resolution: {integrity: sha512-jxytwMHhsbdpBXxLAcuu0fzlQeXCNnWdDyRHpvWsUl8vd98UwYdl9YTyn8/HcpcJPC3pwUveefsa3zTxyD/ERg==}
|
||||
engines: {node: '>=20.18.1'}
|
||||
@ -7120,51 +7105,6 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/client-bedrock@3.1007.0':
|
||||
dependencies:
|
||||
'@aws-crypto/sha256-browser': 5.2.0
|
||||
'@aws-crypto/sha256-js': 5.2.0
|
||||
'@aws-sdk/core': 3.973.19
|
||||
'@aws-sdk/credential-provider-node': 3.972.19
|
||||
'@aws-sdk/middleware-host-header': 3.972.7
|
||||
'@aws-sdk/middleware-logger': 3.972.7
|
||||
'@aws-sdk/middleware-recursion-detection': 3.972.7
|
||||
'@aws-sdk/middleware-user-agent': 3.972.20
|
||||
'@aws-sdk/region-config-resolver': 3.972.7
|
||||
'@aws-sdk/token-providers': 3.1007.0
|
||||
'@aws-sdk/types': 3.973.5
|
||||
'@aws-sdk/util-endpoints': 3.996.4
|
||||
'@aws-sdk/util-user-agent-browser': 3.972.7
|
||||
'@aws-sdk/util-user-agent-node': 3.973.5
|
||||
'@smithy/config-resolver': 4.4.10
|
||||
'@smithy/core': 3.23.9
|
||||
'@smithy/fetch-http-handler': 5.3.13
|
||||
'@smithy/hash-node': 4.2.11
|
||||
'@smithy/invalid-dependency': 4.2.11
|
||||
'@smithy/middleware-content-length': 4.2.11
|
||||
'@smithy/middleware-endpoint': 4.4.23
|
||||
'@smithy/middleware-retry': 4.4.40
|
||||
'@smithy/middleware-serde': 4.2.12
|
||||
'@smithy/middleware-stack': 4.2.11
|
||||
'@smithy/node-config-provider': 4.3.11
|
||||
'@smithy/node-http-handler': 4.4.14
|
||||
'@smithy/protocol-http': 5.3.11
|
||||
'@smithy/smithy-client': 4.12.3
|
||||
'@smithy/types': 4.13.0
|
||||
'@smithy/url-parser': 4.2.11
|
||||
'@smithy/util-base64': 4.3.2
|
||||
'@smithy/util-body-length-browser': 4.2.2
|
||||
'@smithy/util-body-length-node': 4.2.3
|
||||
'@smithy/util-defaults-mode-browser': 4.3.39
|
||||
'@smithy/util-defaults-mode-node': 4.2.42
|
||||
'@smithy/util-endpoints': 3.3.2
|
||||
'@smithy/util-middleware': 4.2.11
|
||||
'@smithy/util-retry': 4.2.11
|
||||
'@smithy/util-utf8': 4.2.2
|
||||
tslib: 2.8.1
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/client-bedrock@3.1008.0':
|
||||
dependencies:
|
||||
'@aws-crypto/sha256-browser': 5.2.0
|
||||
@ -7424,25 +7364,6 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/credential-provider-ini@3.972.18':
|
||||
dependencies:
|
||||
'@aws-sdk/core': 3.973.19
|
||||
'@aws-sdk/credential-provider-env': 3.972.17
|
||||
'@aws-sdk/credential-provider-http': 3.972.19
|
||||
'@aws-sdk/credential-provider-login': 3.972.18
|
||||
'@aws-sdk/credential-provider-process': 3.972.17
|
||||
'@aws-sdk/credential-provider-sso': 3.972.18
|
||||
'@aws-sdk/credential-provider-web-identity': 3.972.18
|
||||
'@aws-sdk/nested-clients': 3.996.8
|
||||
'@aws-sdk/types': 3.973.5
|
||||
'@smithy/credential-provider-imds': 4.2.11
|
||||
'@smithy/property-provider': 4.2.11
|
||||
'@smithy/shared-ini-file-loader': 4.4.6
|
||||
'@smithy/types': 4.13.0
|
||||
tslib: 2.8.1
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/credential-provider-ini@3.972.19':
|
||||
dependencies:
|
||||
'@aws-sdk/core': 3.973.19
|
||||
@ -7488,19 +7409,6 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/credential-provider-login@3.972.18':
|
||||
dependencies:
|
||||
'@aws-sdk/core': 3.973.19
|
||||
'@aws-sdk/nested-clients': 3.996.8
|
||||
'@aws-sdk/types': 3.973.5
|
||||
'@smithy/property-provider': 4.2.11
|
||||
'@smithy/protocol-http': 5.3.11
|
||||
'@smithy/shared-ini-file-loader': 4.4.6
|
||||
'@smithy/types': 4.13.0
|
||||
tslib: 2.8.1
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/credential-provider-login@3.972.19':
|
||||
dependencies:
|
||||
'@aws-sdk/core': 3.973.19
|
||||
@ -7548,23 +7456,6 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/credential-provider-node@3.972.19':
|
||||
dependencies:
|
||||
'@aws-sdk/credential-provider-env': 3.972.17
|
||||
'@aws-sdk/credential-provider-http': 3.972.19
|
||||
'@aws-sdk/credential-provider-ini': 3.972.18
|
||||
'@aws-sdk/credential-provider-process': 3.972.17
|
||||
'@aws-sdk/credential-provider-sso': 3.972.18
|
||||
'@aws-sdk/credential-provider-web-identity': 3.972.18
|
||||
'@aws-sdk/types': 3.973.5
|
||||
'@smithy/credential-provider-imds': 4.2.11
|
||||
'@smithy/property-provider': 4.2.11
|
||||
'@smithy/shared-ini-file-loader': 4.4.6
|
||||
'@smithy/types': 4.13.0
|
||||
tslib: 2.8.1
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/credential-provider-node@3.972.20':
|
||||
dependencies:
|
||||
'@aws-sdk/credential-provider-env': 3.972.17
|
||||
@ -7635,19 +7526,6 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/credential-provider-sso@3.972.18':
|
||||
dependencies:
|
||||
'@aws-sdk/core': 3.973.19
|
||||
'@aws-sdk/nested-clients': 3.996.8
|
||||
'@aws-sdk/token-providers': 3.1005.0
|
||||
'@aws-sdk/types': 3.973.5
|
||||
'@smithy/property-provider': 4.2.11
|
||||
'@smithy/shared-ini-file-loader': 4.4.6
|
||||
'@smithy/types': 4.13.0
|
||||
tslib: 2.8.1
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/credential-provider-sso@3.972.19':
|
||||
dependencies:
|
||||
'@aws-sdk/core': 3.973.19
|
||||
@ -7685,18 +7563,6 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/credential-provider-web-identity@3.972.18':
|
||||
dependencies:
|
||||
'@aws-sdk/core': 3.973.19
|
||||
'@aws-sdk/nested-clients': 3.996.8
|
||||
'@aws-sdk/types': 3.973.5
|
||||
'@smithy/property-provider': 4.2.11
|
||||
'@smithy/shared-ini-file-loader': 4.4.6
|
||||
'@smithy/types': 4.13.0
|
||||
tslib: 2.8.1
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/credential-provider-web-identity@3.972.19':
|
||||
dependencies:
|
||||
'@aws-sdk/core': 3.973.19
|
||||
@ -7961,49 +7827,6 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/nested-clients@3.996.8':
|
||||
dependencies:
|
||||
'@aws-crypto/sha256-browser': 5.2.0
|
||||
'@aws-crypto/sha256-js': 5.2.0
|
||||
'@aws-sdk/core': 3.973.19
|
||||
'@aws-sdk/middleware-host-header': 3.972.7
|
||||
'@aws-sdk/middleware-logger': 3.972.7
|
||||
'@aws-sdk/middleware-recursion-detection': 3.972.7
|
||||
'@aws-sdk/middleware-user-agent': 3.972.20
|
||||
'@aws-sdk/region-config-resolver': 3.972.7
|
||||
'@aws-sdk/types': 3.973.5
|
||||
'@aws-sdk/util-endpoints': 3.996.4
|
||||
'@aws-sdk/util-user-agent-browser': 3.972.7
|
||||
'@aws-sdk/util-user-agent-node': 3.973.5
|
||||
'@smithy/config-resolver': 4.4.10
|
||||
'@smithy/core': 3.23.9
|
||||
'@smithy/fetch-http-handler': 5.3.13
|
||||
'@smithy/hash-node': 4.2.11
|
||||
'@smithy/invalid-dependency': 4.2.11
|
||||
'@smithy/middleware-content-length': 4.2.11
|
||||
'@smithy/middleware-endpoint': 4.4.23
|
||||
'@smithy/middleware-retry': 4.4.40
|
||||
'@smithy/middleware-serde': 4.2.12
|
||||
'@smithy/middleware-stack': 4.2.11
|
||||
'@smithy/node-config-provider': 4.3.11
|
||||
'@smithy/node-http-handler': 4.4.14
|
||||
'@smithy/protocol-http': 5.3.11
|
||||
'@smithy/smithy-client': 4.12.3
|
||||
'@smithy/types': 4.13.0
|
||||
'@smithy/url-parser': 4.2.11
|
||||
'@smithy/util-base64': 4.3.2
|
||||
'@smithy/util-body-length-browser': 4.2.2
|
||||
'@smithy/util-body-length-node': 4.2.3
|
||||
'@smithy/util-defaults-mode-browser': 4.3.39
|
||||
'@smithy/util-defaults-mode-node': 4.2.42
|
||||
'@smithy/util-endpoints': 3.3.2
|
||||
'@smithy/util-middleware': 4.2.11
|
||||
'@smithy/util-retry': 4.2.11
|
||||
'@smithy/util-utf8': 4.2.2
|
||||
tslib: 2.8.1
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/nested-clients@3.996.9':
|
||||
dependencies:
|
||||
'@aws-crypto/sha256-browser': 5.2.0
|
||||
@ -8095,30 +7918,6 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/token-providers@3.1005.0':
|
||||
dependencies:
|
||||
'@aws-sdk/core': 3.973.19
|
||||
'@aws-sdk/nested-clients': 3.996.8
|
||||
'@aws-sdk/types': 3.973.5
|
||||
'@smithy/property-provider': 4.2.11
|
||||
'@smithy/shared-ini-file-loader': 4.4.6
|
||||
'@smithy/types': 4.13.0
|
||||
tslib: 2.8.1
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/token-providers@3.1007.0':
|
||||
dependencies:
|
||||
'@aws-sdk/core': 3.973.19
|
||||
'@aws-sdk/nested-clients': 3.996.8
|
||||
'@aws-sdk/types': 3.973.5
|
||||
'@smithy/property-provider': 4.2.11
|
||||
'@smithy/shared-ini-file-loader': 4.4.6
|
||||
'@smithy/types': 4.13.0
|
||||
tslib: 2.8.1
|
||||
transitivePeerDependencies:
|
||||
- aws-crt
|
||||
|
||||
'@aws-sdk/token-providers@3.1008.0':
|
||||
dependencies:
|
||||
'@aws-sdk/core': 3.973.19
|
||||
@ -8225,14 +8024,6 @@ snapshots:
|
||||
'@smithy/types': 4.13.0
|
||||
tslib: 2.8.1
|
||||
|
||||
'@aws-sdk/util-user-agent-node@3.973.5':
|
||||
dependencies:
|
||||
'@aws-sdk/middleware-user-agent': 3.972.20
|
||||
'@aws-sdk/types': 3.973.5
|
||||
'@smithy/node-config-provider': 4.3.11
|
||||
'@smithy/types': 4.13.0
|
||||
tslib: 2.8.1
|
||||
|
||||
'@aws-sdk/util-user-agent-node@3.973.6':
|
||||
dependencies:
|
||||
'@aws-sdk/middleware-user-agent': 3.972.20
|
||||
@ -8645,12 +8436,14 @@ snapshots:
|
||||
optionalDependencies:
|
||||
'@noble/hashes': 2.0.1
|
||||
|
||||
'@google/genai@1.44.0':
|
||||
'@google/genai@1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))':
|
||||
dependencies:
|
||||
google-auth-library: 10.6.1
|
||||
p-retry: 4.6.2
|
||||
protobufjs: 7.5.4
|
||||
ws: 8.19.0
|
||||
optionalDependencies:
|
||||
'@modelcontextprotocol/sdk': 1.27.1(zod@4.3.6)
|
||||
transitivePeerDependencies:
|
||||
- bufferutil
|
||||
- supports-color
|
||||
@ -8698,7 +8491,6 @@ snapshots:
|
||||
'@hono/node-server@1.19.10(hono@4.12.7)':
|
||||
dependencies:
|
||||
hono: 4.12.7
|
||||
optional: true
|
||||
|
||||
'@huggingface/jinja@0.5.5': {}
|
||||
|
||||
@ -9025,9 +8817,9 @@ snapshots:
|
||||
std-env: 3.10.0
|
||||
yoctocolors: 2.1.2
|
||||
|
||||
'@mariozechner/pi-agent-core@0.57.1(ws@8.19.0)(zod@4.3.6)':
|
||||
'@mariozechner/pi-agent-core@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)':
|
||||
dependencies:
|
||||
'@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6)
|
||||
'@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
|
||||
transitivePeerDependencies:
|
||||
- '@modelcontextprotocol/sdk'
|
||||
- aws-crt
|
||||
@ -9037,11 +8829,11 @@ snapshots:
|
||||
- ws
|
||||
- zod
|
||||
|
||||
'@mariozechner/pi-ai@0.57.1(ws@8.19.0)(zod@4.3.6)':
|
||||
'@mariozechner/pi-ai@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)':
|
||||
dependencies:
|
||||
'@anthropic-ai/sdk': 0.73.0(zod@4.3.6)
|
||||
'@aws-sdk/client-bedrock-runtime': 3.1004.0
|
||||
'@google/genai': 1.44.0
|
||||
'@google/genai': 1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))
|
||||
'@mistralai/mistralai': 1.14.1
|
||||
'@sinclair/typebox': 0.34.48
|
||||
ajv: 8.18.0
|
||||
@ -9061,11 +8853,11 @@ snapshots:
|
||||
- ws
|
||||
- zod
|
||||
|
||||
'@mariozechner/pi-coding-agent@0.57.1(ws@8.19.0)(zod@4.3.6)':
|
||||
'@mariozechner/pi-coding-agent@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)':
|
||||
dependencies:
|
||||
'@mariozechner/jiti': 2.6.5
|
||||
'@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6)
|
||||
'@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6)
|
||||
'@mariozechner/pi-agent-core': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
|
||||
'@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
|
||||
'@mariozechner/pi-tui': 0.57.1
|
||||
'@silvia-odwyer/photon-node': 0.3.4
|
||||
chalk: 5.6.2
|
||||
@ -9141,6 +8933,28 @@ snapshots:
|
||||
- bufferutil
|
||||
- utf-8-validate
|
||||
|
||||
'@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)':
|
||||
dependencies:
|
||||
'@hono/node-server': 1.19.10(hono@4.12.7)
|
||||
ajv: 8.18.0
|
||||
ajv-formats: 3.0.1(ajv@8.18.0)
|
||||
content-type: 1.0.5
|
||||
cors: 2.8.6
|
||||
cross-spawn: 7.0.6
|
||||
eventsource: 3.0.7
|
||||
eventsource-parser: 3.0.6
|
||||
express: 5.2.1
|
||||
express-rate-limit: 8.3.1(express@5.2.1)
|
||||
hono: 4.12.7
|
||||
jose: 6.2.1
|
||||
json-schema-typed: 8.0.2
|
||||
pkce-challenge: 5.0.1
|
||||
raw-body: 3.0.2
|
||||
zod: 4.3.6
|
||||
zod-to-json-schema: 3.25.1(zod@4.3.6)
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@mozilla/readability@0.6.0': {}
|
||||
|
||||
'@napi-rs/canvas-android-arm64@0.1.95':
|
||||
@ -11916,6 +11730,11 @@ snapshots:
|
||||
|
||||
core-util-is@1.0.3: {}
|
||||
|
||||
cors@2.8.6:
|
||||
dependencies:
|
||||
object-assign: 4.1.1
|
||||
vary: 1.1.2
|
||||
|
||||
croner@10.0.1: {}
|
||||
|
||||
cross-spawn@7.0.6:
|
||||
@ -12167,6 +11986,12 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- bare-abort-controller
|
||||
|
||||
eventsource-parser@3.0.6: {}
|
||||
|
||||
eventsource@3.0.7:
|
||||
dependencies:
|
||||
eventsource-parser: 3.0.6
|
||||
|
||||
execa@4.1.0:
|
||||
dependencies:
|
||||
cross-spawn: 7.0.6
|
||||
@ -12183,6 +12008,11 @@ snapshots:
|
||||
|
||||
exponential-backoff@3.1.3: {}
|
||||
|
||||
express-rate-limit@8.3.1(express@5.2.1):
|
||||
dependencies:
|
||||
express: 5.2.1
|
||||
ip-address: 10.1.0
|
||||
|
||||
express@4.22.1:
|
||||
dependencies:
|
||||
accepts: 1.3.8
|
||||
@ -12826,6 +12656,8 @@ snapshots:
|
||||
|
||||
jose@4.15.9: {}
|
||||
|
||||
jose@6.2.1: {}
|
||||
|
||||
js-stringify@1.0.2: {}
|
||||
|
||||
js-tokens@10.0.0: {}
|
||||
@ -12893,6 +12725,8 @@ snapshots:
|
||||
|
||||
json-schema-traverse@1.0.0: {}
|
||||
|
||||
json-schema-typed@8.0.2: {}
|
||||
|
||||
json-schema@0.4.0: {}
|
||||
|
||||
json-stringify-safe@5.0.1: {}
|
||||
@ -13497,81 +13331,6 @@ snapshots:
|
||||
ws: 8.19.0
|
||||
zod: 4.3.6
|
||||
|
||||
openclaw@2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)):
|
||||
dependencies:
|
||||
'@agentclientprotocol/sdk': 0.16.1(zod@4.3.6)
|
||||
'@aws-sdk/client-bedrock': 3.1007.0
|
||||
'@buape/carbon': 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1)
|
||||
'@clack/prompts': 1.1.0
|
||||
'@discordjs/voice': 0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1)
|
||||
'@grammyjs/runner': 2.0.3(grammy@1.41.1)
|
||||
'@grammyjs/transformer-throttler': 1.2.1(grammy@1.41.1)
|
||||
'@homebridge/ciao': 1.3.5
|
||||
'@larksuiteoapi/node-sdk': 1.59.0
|
||||
'@line/bot-sdk': 10.6.0
|
||||
'@lydell/node-pty': 1.2.0-beta.3
|
||||
'@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6)
|
||||
'@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6)
|
||||
'@mariozechner/pi-coding-agent': 0.57.1(ws@8.19.0)(zod@4.3.6)
|
||||
'@mariozechner/pi-tui': 0.57.1
|
||||
'@mozilla/readability': 0.6.0
|
||||
'@napi-rs/canvas': 0.1.95
|
||||
'@sinclair/typebox': 0.34.48
|
||||
'@slack/bolt': 4.6.0(@types/express@5.0.6)
|
||||
'@slack/web-api': 7.14.1
|
||||
'@whiskeysockets/baileys': 7.0.0-rc.9(audio-decode@2.2.3)(sharp@0.34.5)
|
||||
ajv: 8.18.0
|
||||
chalk: 5.6.2
|
||||
chokidar: 5.0.0
|
||||
cli-highlight: 2.1.11
|
||||
commander: 14.0.3
|
||||
croner: 10.0.1
|
||||
discord-api-types: 0.38.42
|
||||
dotenv: 17.3.1
|
||||
express: 5.2.1
|
||||
file-type: 21.3.1
|
||||
grammy: 1.41.1
|
||||
hono: 4.12.7
|
||||
https-proxy-agent: 8.0.0
|
||||
ipaddr.js: 2.3.0
|
||||
jiti: 2.6.1
|
||||
json5: 2.2.3
|
||||
jszip: 3.10.1
|
||||
linkedom: 0.18.12
|
||||
long: 5.3.2
|
||||
markdown-it: 14.1.1
|
||||
node-edge-tts: 1.2.10
|
||||
node-llama-cpp: 3.16.2(typescript@5.9.3)
|
||||
opusscript: 0.1.1
|
||||
osc-progress: 0.3.0
|
||||
pdfjs-dist: 5.5.207
|
||||
playwright-core: 1.58.2
|
||||
qrcode-terminal: 0.12.0
|
||||
sharp: 0.34.5
|
||||
sqlite-vec: 0.1.7-alpha.2
|
||||
tar: 7.5.11
|
||||
tslog: 4.10.2
|
||||
undici: 7.22.0
|
||||
ws: 8.19.0
|
||||
yaml: 2.8.2
|
||||
zod: 4.3.6
|
||||
transitivePeerDependencies:
|
||||
- '@discordjs/opus'
|
||||
- '@modelcontextprotocol/sdk'
|
||||
- '@types/express'
|
||||
- audio-decode
|
||||
- aws-crt
|
||||
- bufferutil
|
||||
- canvas
|
||||
- debug
|
||||
- encoding
|
||||
- ffmpeg-static
|
||||
- jimp
|
||||
- link-preview-js
|
||||
- node-opus
|
||||
- supports-color
|
||||
- utf-8-validate
|
||||
|
||||
opus-decoder@0.7.11:
|
||||
dependencies:
|
||||
'@wasm-audio-decoders/common': 9.0.7
|
||||
@ -13784,6 +13543,8 @@ snapshots:
|
||||
sonic-boom: 4.2.1
|
||||
thread-stream: 3.1.0
|
||||
|
||||
pkce-challenge@5.0.1: {}
|
||||
|
||||
playwright-core@1.58.2: {}
|
||||
|
||||
playwright@1.58.2:
|
||||
@ -14725,8 +14486,6 @@ snapshots:
|
||||
|
||||
undici-types@7.18.2: {}
|
||||
|
||||
undici@7.22.0: {}
|
||||
|
||||
undici@7.24.0: {}
|
||||
|
||||
unist-util-is@6.0.1:
|
||||
|
||||
@ -5,6 +5,7 @@ import { appendFileSync } from "node:fs";
|
||||
|
||||
const DOCS_PATH_RE = /^(docs\/|.*\.mdx?$)/;
|
||||
const SKILLS_PYTHON_SCOPE_RE = /^skills\//;
|
||||
const CI_WORKFLOW_SCOPE_RE = /^\.github\/workflows\/ci\.yml$/;
|
||||
const MACOS_PROTOCOL_GEN_RE =
|
||||
/^(apps\/macos\/Sources\/OpenClawProtocol\/|apps\/shared\/OpenClawKit\/Sources\/OpenClawProtocol\/)/;
|
||||
const MACOS_NATIVE_RE = /^(apps\/macos\/|apps\/ios\/|apps\/shared\/|Swabble\/)/;
|
||||
@ -55,6 +56,12 @@ export function detectChangedScope(changedPaths) {
|
||||
runSkillsPython = true;
|
||||
}
|
||||
|
||||
if (CI_WORKFLOW_SCOPE_RE.test(path)) {
|
||||
runMacos = true;
|
||||
runAndroid = true;
|
||||
runSkillsPython = true;
|
||||
}
|
||||
|
||||
if (!MACOS_PROTOCOL_GEN_RE.test(path) && MACOS_NATIVE_RE.test(path)) {
|
||||
runMacos = true;
|
||||
}
|
||||
|
||||
@ -104,11 +104,11 @@ const hostMemoryGiB = Math.floor(os.totalmem() / 1024 ** 3);
|
||||
const highMemLocalHost = !isCI && hostMemoryGiB >= 96;
|
||||
const lowMemLocalHost = !isCI && hostMemoryGiB < 64;
|
||||
const nodeMajor = Number.parseInt(process.versions.node.split(".")[0] ?? "", 10);
|
||||
// vmForks is a big win for transform/import heavy suites, but Node 24+
|
||||
// regressed with Vitest's vm runtime in this repo, and low-memory local hosts
|
||||
// are more likely to hit per-worker V8 heap ceilings. Keep it opt-out via
|
||||
// OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1.
|
||||
const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor < 24 : true;
|
||||
// vmForks is a big win for transform/import heavy suites. Node 24 is stable again
|
||||
// for the default unit-fast lane after moving the known flaky files to fork-only
|
||||
// isolation, but Node 25+ still falls back to process forks until re-validated.
|
||||
// Keep it opt-out via OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1.
|
||||
const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor <= 24 : true;
|
||||
const useVmForks =
|
||||
process.env.OPENCLAW_TEST_VM_FORKS === "1" ||
|
||||
(process.env.OPENCLAW_TEST_VM_FORKS !== "0" && !isWindows && supportsVmForks && !lowMemLocalHost);
|
||||
|
||||
@ -7,7 +7,52 @@ import { createInMemorySessionStore } from "./session.js";
|
||||
import { AcpGatewayAgent } from "./translator.js";
|
||||
import { createAcpConnection, createAcpGateway } from "./translator.test-helpers.js";
|
||||
|
||||
const TEST_SESSION_ID = "session-1";
|
||||
const TEST_SESSION_KEY = "agent:main:main";
|
||||
const TEST_PROMPT = {
|
||||
sessionId: TEST_SESSION_ID,
|
||||
prompt: [{ type: "text", text: "hello" }],
|
||||
_meta: {},
|
||||
} as unknown as PromptRequest;
|
||||
|
||||
describe("acp prompt cwd prefix", () => {
|
||||
const createStopAfterSendSpy = () =>
|
||||
vi.fn(async (method: string) => {
|
||||
if (method === "chat.send") {
|
||||
throw new Error("stop-after-send");
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
async function runPromptAndCaptureRequest(
|
||||
options: {
|
||||
cwd?: string;
|
||||
prefixCwd?: boolean;
|
||||
provenanceMode?: "meta" | "meta+receipt";
|
||||
} = {},
|
||||
) {
|
||||
const sessionStore = createInMemorySessionStore();
|
||||
sessionStore.createSession({
|
||||
sessionId: TEST_SESSION_ID,
|
||||
sessionKey: TEST_SESSION_KEY,
|
||||
cwd: options.cwd ?? path.join(os.homedir(), "openclaw-test"),
|
||||
});
|
||||
|
||||
const requestSpy = createStopAfterSendSpy();
|
||||
const agent = new AcpGatewayAgent(
|
||||
createAcpConnection(),
|
||||
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
|
||||
{
|
||||
sessionStore,
|
||||
prefixCwd: options.prefixCwd,
|
||||
provenanceMode: options.provenanceMode,
|
||||
},
|
||||
);
|
||||
|
||||
await expect(agent.prompt(TEST_PROMPT)).rejects.toThrow("stop-after-send");
|
||||
return requestSpy;
|
||||
}
|
||||
|
||||
async function runPromptWithCwd(cwd: string) {
|
||||
const pinnedHome = os.homedir();
|
||||
const previousOpenClawHome = process.env.OPENCLAW_HOME;
|
||||
@ -15,37 +60,8 @@ describe("acp prompt cwd prefix", () => {
|
||||
delete process.env.OPENCLAW_HOME;
|
||||
process.env.HOME = pinnedHome;
|
||||
|
||||
const sessionStore = createInMemorySessionStore();
|
||||
sessionStore.createSession({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:main",
|
||||
cwd,
|
||||
});
|
||||
|
||||
const requestSpy = vi.fn(async (method: string) => {
|
||||
if (method === "chat.send") {
|
||||
throw new Error("stop-after-send");
|
||||
}
|
||||
return {};
|
||||
});
|
||||
const agent = new AcpGatewayAgent(
|
||||
createAcpConnection(),
|
||||
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
|
||||
{
|
||||
sessionStore,
|
||||
prefixCwd: true,
|
||||
},
|
||||
);
|
||||
|
||||
try {
|
||||
await expect(
|
||||
agent.prompt({
|
||||
sessionId: "session-1",
|
||||
prompt: [{ type: "text", text: "hello" }],
|
||||
_meta: {},
|
||||
} as unknown as PromptRequest),
|
||||
).rejects.toThrow("stop-after-send");
|
||||
return requestSpy;
|
||||
return await runPromptAndCaptureRequest({ cwd, prefixCwd: true });
|
||||
} finally {
|
||||
if (previousOpenClawHome === undefined) {
|
||||
delete process.env.OPENCLAW_HOME;
|
||||
@ -83,42 +99,13 @@ describe("acp prompt cwd prefix", () => {
|
||||
});
|
||||
|
||||
it("injects system provenance metadata when enabled", async () => {
|
||||
const sessionStore = createInMemorySessionStore();
|
||||
sessionStore.createSession({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:main",
|
||||
cwd: path.join(os.homedir(), "openclaw-test"),
|
||||
});
|
||||
|
||||
const requestSpy = vi.fn(async (method: string) => {
|
||||
if (method === "chat.send") {
|
||||
throw new Error("stop-after-send");
|
||||
}
|
||||
return {};
|
||||
});
|
||||
const agent = new AcpGatewayAgent(
|
||||
createAcpConnection(),
|
||||
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
|
||||
{
|
||||
sessionStore,
|
||||
provenanceMode: "meta",
|
||||
},
|
||||
);
|
||||
|
||||
await expect(
|
||||
agent.prompt({
|
||||
sessionId: "session-1",
|
||||
prompt: [{ type: "text", text: "hello" }],
|
||||
_meta: {},
|
||||
} as unknown as PromptRequest),
|
||||
).rejects.toThrow("stop-after-send");
|
||||
|
||||
const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta" });
|
||||
expect(requestSpy).toHaveBeenCalledWith(
|
||||
"chat.send",
|
||||
expect.objectContaining({
|
||||
systemInputProvenance: {
|
||||
kind: "external_user",
|
||||
originSessionId: "session-1",
|
||||
originSessionId: TEST_SESSION_ID,
|
||||
sourceChannel: "acp",
|
||||
sourceTool: "openclaw_acp",
|
||||
},
|
||||
@ -129,42 +116,13 @@ describe("acp prompt cwd prefix", () => {
|
||||
});
|
||||
|
||||
it("injects a system provenance receipt when requested", async () => {
|
||||
const sessionStore = createInMemorySessionStore();
|
||||
sessionStore.createSession({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:main",
|
||||
cwd: path.join(os.homedir(), "openclaw-test"),
|
||||
});
|
||||
|
||||
const requestSpy = vi.fn(async (method: string) => {
|
||||
if (method === "chat.send") {
|
||||
throw new Error("stop-after-send");
|
||||
}
|
||||
return {};
|
||||
});
|
||||
const agent = new AcpGatewayAgent(
|
||||
createAcpConnection(),
|
||||
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
|
||||
{
|
||||
sessionStore,
|
||||
provenanceMode: "meta+receipt",
|
||||
},
|
||||
);
|
||||
|
||||
await expect(
|
||||
agent.prompt({
|
||||
sessionId: "session-1",
|
||||
prompt: [{ type: "text", text: "hello" }],
|
||||
_meta: {},
|
||||
} as unknown as PromptRequest),
|
||||
).rejects.toThrow("stop-after-send");
|
||||
|
||||
const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta+receipt" });
|
||||
expect(requestSpy).toHaveBeenCalledWith(
|
||||
"chat.send",
|
||||
expect.objectContaining({
|
||||
systemInputProvenance: {
|
||||
kind: "external_user",
|
||||
originSessionId: "session-1",
|
||||
originSessionId: TEST_SESSION_ID,
|
||||
sourceChannel: "acp",
|
||||
sourceTool: "openclaw_acp",
|
||||
},
|
||||
@ -182,14 +140,14 @@ describe("acp prompt cwd prefix", () => {
|
||||
expect(requestSpy).toHaveBeenCalledWith(
|
||||
"chat.send",
|
||||
expect.objectContaining({
|
||||
systemProvenanceReceipt: expect.stringContaining("originSessionId=session-1"),
|
||||
systemProvenanceReceipt: expect.stringContaining(`originSessionId=${TEST_SESSION_ID}`),
|
||||
}),
|
||||
{ expectFinal: true },
|
||||
);
|
||||
expect(requestSpy).toHaveBeenCalledWith(
|
||||
"chat.send",
|
||||
expect.objectContaining({
|
||||
systemProvenanceReceipt: expect.stringContaining("targetSession=agent:main:main"),
|
||||
systemProvenanceReceipt: expect.stringContaining(`targetSession=${TEST_SESSION_KEY}`),
|
||||
}),
|
||||
{ expectFinal: true },
|
||||
);
|
||||
|
||||
@ -1,10 +1,5 @@
|
||||
import type { AgentToolResult } from "@mariozechner/pi-agent-core";
|
||||
import { loadConfig } from "../config/config.js";
|
||||
import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js";
|
||||
import {
|
||||
hasConfiguredExecApprovalDmRoute,
|
||||
resolveExecApprovalInitiatingSurfaceState,
|
||||
} from "../infra/exec-approval-surface.js";
|
||||
import {
|
||||
addAllowlistEntry,
|
||||
type ExecAsk,
|
||||
@ -26,7 +21,7 @@ import {
|
||||
registerExecApprovalRequestForHostOrThrow,
|
||||
} from "./bash-tools.exec-approval-request.js";
|
||||
import {
|
||||
createDefaultExecApprovalRequestContext,
|
||||
createAndRegisterDefaultExecApprovalRequest,
|
||||
resolveBaseExecApprovalDecision,
|
||||
resolveApprovalDecisionOrUndefined,
|
||||
resolveExecHostApprovalContext,
|
||||
@ -149,52 +144,36 @@ export async function processGatewayAllowlist(
|
||||
approvalId,
|
||||
approvalSlug,
|
||||
warningText,
|
||||
expiresAtMs: defaultExpiresAtMs,
|
||||
preResolvedDecision: defaultPreResolvedDecision,
|
||||
} = createDefaultExecApprovalRequestContext({
|
||||
expiresAtMs,
|
||||
preResolvedDecision,
|
||||
initiatingSurface,
|
||||
sentApproverDms,
|
||||
unavailableReason,
|
||||
} = await createAndRegisterDefaultExecApprovalRequest({
|
||||
warnings: params.warnings,
|
||||
approvalRunningNoticeMs: params.approvalRunningNoticeMs,
|
||||
createApprovalSlug,
|
||||
turnSourceChannel: params.turnSourceChannel,
|
||||
turnSourceAccountId: params.turnSourceAccountId,
|
||||
register: async (approvalId) =>
|
||||
await registerExecApprovalRequestForHostOrThrow({
|
||||
approvalId,
|
||||
command: params.command,
|
||||
workdir: params.workdir,
|
||||
host: "gateway",
|
||||
security: hostSecurity,
|
||||
ask: hostAsk,
|
||||
...buildExecApprovalRequesterContext({
|
||||
agentId: params.agentId,
|
||||
sessionKey: params.sessionKey,
|
||||
}),
|
||||
resolvedPath: allowlistEval.segments[0]?.resolution?.resolvedPath,
|
||||
...buildExecApprovalTurnSourceContext(params),
|
||||
}),
|
||||
});
|
||||
const resolvedPath = allowlistEval.segments[0]?.resolution?.resolvedPath;
|
||||
const effectiveTimeout =
|
||||
typeof params.timeoutSec === "number" ? params.timeoutSec : params.defaultTimeoutSec;
|
||||
let expiresAtMs = defaultExpiresAtMs;
|
||||
let preResolvedDecision = defaultPreResolvedDecision;
|
||||
|
||||
// Register first so the returned approval ID is actionable immediately.
|
||||
const registration = await registerExecApprovalRequestForHostOrThrow({
|
||||
approvalId,
|
||||
command: params.command,
|
||||
workdir: params.workdir,
|
||||
host: "gateway",
|
||||
security: hostSecurity,
|
||||
ask: hostAsk,
|
||||
...buildExecApprovalRequesterContext({
|
||||
agentId: params.agentId,
|
||||
sessionKey: params.sessionKey,
|
||||
}),
|
||||
resolvedPath,
|
||||
...buildExecApprovalTurnSourceContext(params),
|
||||
});
|
||||
expiresAtMs = registration.expiresAtMs;
|
||||
preResolvedDecision = registration.finalDecision;
|
||||
const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({
|
||||
channel: params.turnSourceChannel,
|
||||
accountId: params.turnSourceAccountId,
|
||||
});
|
||||
const cfg = loadConfig();
|
||||
const sentApproverDms =
|
||||
(initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") &&
|
||||
hasConfiguredExecApprovalDmRoute(cfg);
|
||||
const unavailableReason =
|
||||
preResolvedDecision === null
|
||||
? "no-approval-route"
|
||||
: initiatingSurface.kind === "disabled"
|
||||
? "initiating-platform-disabled"
|
||||
: initiatingSurface.kind === "unsupported"
|
||||
? "initiating-platform-unsupported"
|
||||
: null;
|
||||
|
||||
void (async () => {
|
||||
const decision = await resolveApprovalDecisionOrUndefined({
|
||||
|
||||
@ -1,11 +1,6 @@
|
||||
import crypto from "node:crypto";
|
||||
import type { AgentToolResult } from "@mariozechner/pi-agent-core";
|
||||
import { loadConfig } from "../config/config.js";
|
||||
import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js";
|
||||
import {
|
||||
hasConfiguredExecApprovalDmRoute,
|
||||
resolveExecApprovalInitiatingSurfaceState,
|
||||
} from "../infra/exec-approval-surface.js";
|
||||
import {
|
||||
type ExecApprovalsFile,
|
||||
type ExecAsk,
|
||||
@ -25,7 +20,7 @@ import {
|
||||
registerExecApprovalRequestForHostOrThrow,
|
||||
} from "./bash-tools.exec-approval-request.js";
|
||||
import {
|
||||
createDefaultExecApprovalRequestContext,
|
||||
createAndRegisterDefaultExecApprovalRequest,
|
||||
resolveBaseExecApprovalDecision,
|
||||
resolveApprovalDecisionOrUndefined,
|
||||
resolveExecHostApprovalContext,
|
||||
@ -225,50 +220,34 @@ export async function executeNodeHostCommand(
|
||||
approvalId,
|
||||
approvalSlug,
|
||||
warningText,
|
||||
expiresAtMs: defaultExpiresAtMs,
|
||||
preResolvedDecision: defaultPreResolvedDecision,
|
||||
} = createDefaultExecApprovalRequestContext({
|
||||
expiresAtMs,
|
||||
preResolvedDecision,
|
||||
initiatingSurface,
|
||||
sentApproverDms,
|
||||
unavailableReason,
|
||||
} = await createAndRegisterDefaultExecApprovalRequest({
|
||||
warnings: params.warnings,
|
||||
approvalRunningNoticeMs: params.approvalRunningNoticeMs,
|
||||
createApprovalSlug,
|
||||
turnSourceChannel: params.turnSourceChannel,
|
||||
turnSourceAccountId: params.turnSourceAccountId,
|
||||
register: async (approvalId) =>
|
||||
await registerExecApprovalRequestForHostOrThrow({
|
||||
approvalId,
|
||||
systemRunPlan: prepared.plan,
|
||||
env: nodeEnv,
|
||||
workdir: runCwd,
|
||||
host: "node",
|
||||
nodeId,
|
||||
security: hostSecurity,
|
||||
ask: hostAsk,
|
||||
...buildExecApprovalRequesterContext({
|
||||
agentId: runAgentId,
|
||||
sessionKey: runSessionKey,
|
||||
}),
|
||||
...buildExecApprovalTurnSourceContext(params),
|
||||
}),
|
||||
});
|
||||
let expiresAtMs = defaultExpiresAtMs;
|
||||
let preResolvedDecision = defaultPreResolvedDecision;
|
||||
|
||||
// Register first so the returned approval ID is actionable immediately.
|
||||
const registration = await registerExecApprovalRequestForHostOrThrow({
|
||||
approvalId,
|
||||
systemRunPlan: prepared.plan,
|
||||
env: nodeEnv,
|
||||
workdir: runCwd,
|
||||
host: "node",
|
||||
nodeId,
|
||||
security: hostSecurity,
|
||||
ask: hostAsk,
|
||||
...buildExecApprovalRequesterContext({
|
||||
agentId: runAgentId,
|
||||
sessionKey: runSessionKey,
|
||||
}),
|
||||
...buildExecApprovalTurnSourceContext(params),
|
||||
});
|
||||
expiresAtMs = registration.expiresAtMs;
|
||||
preResolvedDecision = registration.finalDecision;
|
||||
const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({
|
||||
channel: params.turnSourceChannel,
|
||||
accountId: params.turnSourceAccountId,
|
||||
});
|
||||
const cfg = loadConfig();
|
||||
const sentApproverDms =
|
||||
(initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") &&
|
||||
hasConfiguredExecApprovalDmRoute(cfg);
|
||||
const unavailableReason =
|
||||
preResolvedDecision === null
|
||||
? "no-approval-route"
|
||||
: initiatingSurface.kind === "disabled"
|
||||
? "initiating-platform-disabled"
|
||||
: initiatingSurface.kind === "unsupported"
|
||||
? "initiating-platform-unsupported"
|
||||
: null;
|
||||
|
||||
void (async () => {
|
||||
const decision = await resolveApprovalDecisionOrUndefined({
|
||||
|
||||
@ -1,4 +1,10 @@
|
||||
import crypto from "node:crypto";
|
||||
import { loadConfig } from "../config/config.js";
|
||||
import {
|
||||
hasConfiguredExecApprovalDmRoute,
|
||||
type ExecApprovalInitiatingSurfaceState,
|
||||
resolveExecApprovalInitiatingSurfaceState,
|
||||
} from "../infra/exec-approval-surface.js";
|
||||
import {
|
||||
maxAsk,
|
||||
minSecurity,
|
||||
@ -6,7 +12,10 @@ import {
|
||||
type ExecAsk,
|
||||
type ExecSecurity,
|
||||
} from "../infra/exec-approvals.js";
|
||||
import { resolveRegisteredExecApprovalDecision } from "./bash-tools.exec-approval-request.js";
|
||||
import {
|
||||
type ExecApprovalRegistration,
|
||||
resolveRegisteredExecApprovalDecision,
|
||||
} from "./bash-tools.exec-approval-request.js";
|
||||
import { DEFAULT_APPROVAL_TIMEOUT_MS } from "./bash-tools.exec-runtime.js";
|
||||
|
||||
type ResolvedExecApprovals = ReturnType<typeof resolveExecApprovals>;
|
||||
@ -28,6 +37,22 @@ export type ExecApprovalRequestState = ExecApprovalPendingState & {
|
||||
noticeSeconds: number;
|
||||
};
|
||||
|
||||
export type ExecApprovalUnavailableReason =
|
||||
| "no-approval-route"
|
||||
| "initiating-platform-disabled"
|
||||
| "initiating-platform-unsupported";
|
||||
|
||||
export type RegisteredExecApprovalRequestContext = {
|
||||
approvalId: string;
|
||||
approvalSlug: string;
|
||||
warningText: string;
|
||||
expiresAtMs: number;
|
||||
preResolvedDecision: string | null | undefined;
|
||||
initiatingSurface: ExecApprovalInitiatingSurfaceState;
|
||||
sentApproverDms: boolean;
|
||||
unavailableReason: ExecApprovalUnavailableReason | null;
|
||||
};
|
||||
|
||||
export function createExecApprovalPendingState(params: {
|
||||
warnings: string[];
|
||||
timeoutMs: number;
|
||||
@ -158,3 +183,77 @@ export async function resolveApprovalDecisionOrUndefined(params: {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
export function resolveExecApprovalUnavailableState(params: {
|
||||
turnSourceChannel?: string;
|
||||
turnSourceAccountId?: string;
|
||||
preResolvedDecision: string | null | undefined;
|
||||
}): {
|
||||
initiatingSurface: ExecApprovalInitiatingSurfaceState;
|
||||
sentApproverDms: boolean;
|
||||
unavailableReason: ExecApprovalUnavailableReason | null;
|
||||
} {
|
||||
const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({
|
||||
channel: params.turnSourceChannel,
|
||||
accountId: params.turnSourceAccountId,
|
||||
});
|
||||
const sentApproverDms =
|
||||
(initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") &&
|
||||
hasConfiguredExecApprovalDmRoute(loadConfig());
|
||||
const unavailableReason =
|
||||
params.preResolvedDecision === null
|
||||
? "no-approval-route"
|
||||
: initiatingSurface.kind === "disabled"
|
||||
? "initiating-platform-disabled"
|
||||
: initiatingSurface.kind === "unsupported"
|
||||
? "initiating-platform-unsupported"
|
||||
: null;
|
||||
return {
|
||||
initiatingSurface,
|
||||
sentApproverDms,
|
||||
unavailableReason,
|
||||
};
|
||||
}
|
||||
|
||||
export async function createAndRegisterDefaultExecApprovalRequest(params: {
|
||||
warnings: string[];
|
||||
approvalRunningNoticeMs: number;
|
||||
createApprovalSlug: (approvalId: string) => string;
|
||||
turnSourceChannel?: string;
|
||||
turnSourceAccountId?: string;
|
||||
register: (approvalId: string) => Promise<ExecApprovalRegistration>;
|
||||
}): Promise<RegisteredExecApprovalRequestContext> {
|
||||
const {
|
||||
approvalId,
|
||||
approvalSlug,
|
||||
warningText,
|
||||
expiresAtMs: defaultExpiresAtMs,
|
||||
preResolvedDecision: defaultPreResolvedDecision,
|
||||
} = createDefaultExecApprovalRequestContext({
|
||||
warnings: params.warnings,
|
||||
approvalRunningNoticeMs: params.approvalRunningNoticeMs,
|
||||
createApprovalSlug: params.createApprovalSlug,
|
||||
});
|
||||
const registration = await params.register(approvalId);
|
||||
const preResolvedDecision = registration.finalDecision;
|
||||
const { initiatingSurface, sentApproverDms, unavailableReason } =
|
||||
resolveExecApprovalUnavailableState({
|
||||
turnSourceChannel: params.turnSourceChannel,
|
||||
turnSourceAccountId: params.turnSourceAccountId,
|
||||
preResolvedDecision,
|
||||
});
|
||||
|
||||
return {
|
||||
approvalId,
|
||||
approvalSlug,
|
||||
warningText,
|
||||
expiresAtMs: registration.expiresAtMs ?? defaultExpiresAtMs,
|
||||
preResolvedDecision:
|
||||
registration.finalDecision === undefined
|
||||
? defaultPreResolvedDecision
|
||||
: registration.finalDecision,
|
||||
initiatingSurface,
|
||||
sentApproverDms,
|
||||
unavailableReason,
|
||||
};
|
||||
}
|
||||
|
||||
@ -43,6 +43,162 @@ function buildPreparedSystemRunPayload(rawInvokeParams: unknown) {
|
||||
return buildSystemRunPreparePayload(params);
|
||||
}
|
||||
|
||||
function getTestConfigPath() {
|
||||
return path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
|
||||
}
|
||||
|
||||
async function writeOpenClawConfig(config: Record<string, unknown>, pretty = false) {
|
||||
const configPath = getTestConfigPath();
|
||||
await fs.mkdir(path.dirname(configPath), { recursive: true });
|
||||
await fs.writeFile(configPath, JSON.stringify(config, null, pretty ? 2 : undefined));
|
||||
}
|
||||
|
||||
async function writeExecApprovalsConfig(config: Record<string, unknown>) {
|
||||
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
|
||||
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
|
||||
await fs.writeFile(approvalsPath, JSON.stringify(config, null, 2));
|
||||
}
|
||||
|
||||
function acceptedApprovalResponse(params: unknown) {
|
||||
return { status: "accepted", id: (params as { id?: string })?.id };
|
||||
}
|
||||
|
||||
function getResultText(result: { content: Array<{ type?: string; text?: string }> }) {
|
||||
return result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
}
|
||||
|
||||
function expectPendingApprovalText(
|
||||
result: {
|
||||
details: { status?: string };
|
||||
content: Array<{ type?: string; text?: string }>;
|
||||
},
|
||||
options: {
|
||||
command: string;
|
||||
host: "gateway" | "node";
|
||||
nodeId?: string;
|
||||
interactive?: boolean;
|
||||
},
|
||||
) {
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const details = result.details as { approvalId: string; approvalSlug: string };
|
||||
const pendingText = getResultText(result);
|
||||
expect(pendingText).toContain(
|
||||
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
|
||||
);
|
||||
expect(pendingText).toContain(`full ${details.approvalId}`);
|
||||
expect(pendingText).toContain(`Host: ${options.host}`);
|
||||
if (options.nodeId) {
|
||||
expect(pendingText).toContain(`Node: ${options.nodeId}`);
|
||||
}
|
||||
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
|
||||
expect(pendingText).toContain("Command:\n```sh\n");
|
||||
expect(pendingText).toContain(options.command);
|
||||
if (options.interactive) {
|
||||
expect(pendingText).toContain("Mode: foreground (interactive approvals available).");
|
||||
expect(pendingText).toContain("Background mode requires pre-approved policy");
|
||||
}
|
||||
return details;
|
||||
}
|
||||
|
||||
function expectPendingCommandText(
|
||||
result: {
|
||||
details: { status?: string };
|
||||
content: Array<{ type?: string; text?: string }>;
|
||||
},
|
||||
command: string,
|
||||
) {
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const text = getResultText(result);
|
||||
expect(text).toContain("Command:\n```sh\n");
|
||||
expect(text).toContain(command);
|
||||
}
|
||||
|
||||
function mockGatewayOkCalls(calls: string[]) {
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
calls.push(method);
|
||||
return { ok: true };
|
||||
});
|
||||
}
|
||||
|
||||
function createElevatedAllowlistExecTool() {
|
||||
return createExecTool({
|
||||
ask: "on-miss",
|
||||
security: "allowlist",
|
||||
approvalRunningNoticeMs: 0,
|
||||
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
|
||||
});
|
||||
}
|
||||
|
||||
async function expectGatewayExecWithoutApproval(options: {
|
||||
config: Record<string, unknown>;
|
||||
command: string;
|
||||
ask?: "always" | "on-miss" | "off";
|
||||
}) {
|
||||
await writeExecApprovalsConfig(options.config);
|
||||
const calls: string[] = [];
|
||||
mockGatewayOkCalls(calls);
|
||||
|
||||
const tool = createExecTool({
|
||||
host: "gateway",
|
||||
ask: options.ask,
|
||||
security: "full",
|
||||
approvalRunningNoticeMs: 0,
|
||||
});
|
||||
|
||||
const result = await tool.execute("call-no-approval", { command: options.command });
|
||||
expect(result.details.status).toBe("completed");
|
||||
expect(calls).not.toContain("exec.approval.request");
|
||||
expect(calls).not.toContain("exec.approval.waitDecision");
|
||||
}
|
||||
|
||||
function mockAcceptedApprovalFlow(options: {
|
||||
onAgent?: (params: Record<string, unknown>) => void;
|
||||
onNodeInvoke?: (params: unknown) => unknown;
|
||||
}) {
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return acceptedApprovalResponse(params);
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: "allow-once" };
|
||||
}
|
||||
if (method === "agent" && options.onAgent) {
|
||||
options.onAgent(params as Record<string, unknown>);
|
||||
return { status: "ok" };
|
||||
}
|
||||
if (method === "node.invoke" && options.onNodeInvoke) {
|
||||
return await options.onNodeInvoke(params);
|
||||
}
|
||||
return { ok: true };
|
||||
});
|
||||
}
|
||||
|
||||
function mockPendingApprovalRegistration() {
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: "approval-id" };
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: null };
|
||||
}
|
||||
return { ok: true };
|
||||
});
|
||||
}
|
||||
|
||||
function expectApprovalUnavailableText(result: {
|
||||
details: { status?: string };
|
||||
content: Array<{ type?: string; text?: string }>;
|
||||
}) {
|
||||
expect(result.details.status).toBe("approval-unavailable");
|
||||
const text = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
expect(text).not.toContain("/approve");
|
||||
expect(text).not.toContain("npm view diver name version description");
|
||||
expect(text).not.toContain("Pending command:");
|
||||
expect(text).not.toContain("Host:");
|
||||
expect(text).not.toContain("CWD:");
|
||||
return text;
|
||||
}
|
||||
|
||||
describe("exec approvals", () => {
|
||||
let previousHome: string | undefined;
|
||||
let previousUserProfile: string | undefined;
|
||||
@ -81,18 +237,11 @@ describe("exec approvals", () => {
|
||||
let invokeParams: unknown;
|
||||
let agentParams: unknown;
|
||||
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: (params as { id?: string })?.id };
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: "allow-once" };
|
||||
}
|
||||
if (method === "agent") {
|
||||
mockAcceptedApprovalFlow({
|
||||
onAgent: (params) => {
|
||||
agentParams = params;
|
||||
return { status: "ok" };
|
||||
}
|
||||
if (method === "node.invoke") {
|
||||
},
|
||||
onNodeInvoke: (params) => {
|
||||
const invoke = params as { command?: string };
|
||||
if (invoke.command === "system.run.prepare") {
|
||||
return buildPreparedSystemRunPayload(params);
|
||||
@ -101,8 +250,7 @@ describe("exec approvals", () => {
|
||||
invokeParams = params;
|
||||
return { payload: { success: true, stdout: "ok" } };
|
||||
}
|
||||
}
|
||||
return { ok: true };
|
||||
},
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
@ -113,19 +261,12 @@ describe("exec approvals", () => {
|
||||
});
|
||||
|
||||
const result = await tool.execute("call1", { command: "ls -la" });
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const details = result.details as { approvalId: string; approvalSlug: string };
|
||||
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
expect(pendingText).toContain(
|
||||
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
|
||||
);
|
||||
expect(pendingText).toContain(`full ${details.approvalId}`);
|
||||
expect(pendingText).toContain("Host: node");
|
||||
expect(pendingText).toContain("Node: node-1");
|
||||
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
|
||||
expect(pendingText).toContain("Command:\n```sh\nls -la\n```");
|
||||
expect(pendingText).toContain("Mode: foreground (interactive approvals available).");
|
||||
expect(pendingText).toContain("Background mode requires pre-approved policy");
|
||||
const details = expectPendingApprovalText(result, {
|
||||
command: "ls -la",
|
||||
host: "node",
|
||||
nodeId: "node-1",
|
||||
interactive: true,
|
||||
});
|
||||
const approvalId = details.approvalId;
|
||||
|
||||
await expect
|
||||
@ -214,74 +355,28 @@ describe("exec approvals", () => {
|
||||
});
|
||||
|
||||
it("uses exec-approvals ask=off to suppress gateway prompts", async () => {
|
||||
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
|
||||
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
approvalsPath,
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
defaults: { security: "full", ask: "off", askFallback: "full" },
|
||||
agents: {
|
||||
main: { security: "full", ask: "off", askFallback: "full" },
|
||||
},
|
||||
await expectGatewayExecWithoutApproval({
|
||||
config: {
|
||||
version: 1,
|
||||
defaults: { security: "full", ask: "off", askFallback: "full" },
|
||||
agents: {
|
||||
main: { security: "full", ask: "off", askFallback: "full" },
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
);
|
||||
|
||||
const calls: string[] = [];
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
calls.push(method);
|
||||
return { ok: true };
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
host: "gateway",
|
||||
},
|
||||
command: "echo ok",
|
||||
ask: "on-miss",
|
||||
security: "full",
|
||||
approvalRunningNoticeMs: 0,
|
||||
});
|
||||
|
||||
const result = await tool.execute("call3b", { command: "echo ok" });
|
||||
expect(result.details.status).toBe("completed");
|
||||
expect(calls).not.toContain("exec.approval.request");
|
||||
expect(calls).not.toContain("exec.approval.waitDecision");
|
||||
});
|
||||
|
||||
it("inherits ask=off from exec-approvals defaults when tool ask is unset", async () => {
|
||||
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
|
||||
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
approvalsPath,
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
defaults: { security: "full", ask: "off", askFallback: "full" },
|
||||
agents: {},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
);
|
||||
|
||||
const calls: string[] = [];
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
calls.push(method);
|
||||
return { ok: true };
|
||||
await expectGatewayExecWithoutApproval({
|
||||
config: {
|
||||
version: 1,
|
||||
defaults: { security: "full", ask: "off", askFallback: "full" },
|
||||
agents: {},
|
||||
},
|
||||
command: "echo ok",
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
host: "gateway",
|
||||
security: "full",
|
||||
approvalRunningNoticeMs: 0,
|
||||
});
|
||||
|
||||
const result = await tool.execute("call3c", { command: "echo ok" });
|
||||
expect(result.details.status).toBe("completed");
|
||||
expect(calls).not.toContain("exec.approval.request");
|
||||
expect(calls).not.toContain("exec.approval.waitDecision");
|
||||
});
|
||||
|
||||
it("requires approval for elevated ask when allowlist misses", async () => {
|
||||
@ -296,7 +391,7 @@ describe("exec approvals", () => {
|
||||
if (method === "exec.approval.request") {
|
||||
resolveApproval?.();
|
||||
// Return registration confirmation
|
||||
return { status: "accepted", id: (params as { id?: string })?.id };
|
||||
return acceptedApprovalResponse(params);
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: "deny" };
|
||||
@ -304,24 +399,10 @@ describe("exec approvals", () => {
|
||||
return { ok: true };
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
ask: "on-miss",
|
||||
security: "allowlist",
|
||||
approvalRunningNoticeMs: 0,
|
||||
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
|
||||
});
|
||||
const tool = createElevatedAllowlistExecTool();
|
||||
|
||||
const result = await tool.execute("call4", { command: "echo ok", elevated: true });
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const details = result.details as { approvalId: string; approvalSlug: string };
|
||||
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
expect(pendingText).toContain(
|
||||
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
|
||||
);
|
||||
expect(pendingText).toContain(`full ${details.approvalId}`);
|
||||
expect(pendingText).toContain("Host: gateway");
|
||||
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
|
||||
expect(pendingText).toContain("Command:\n```sh\necho ok\n```");
|
||||
expectPendingApprovalText(result, { command: "echo ok", host: "gateway" });
|
||||
await approvalSeen;
|
||||
expect(calls).toContain("exec.approval.request");
|
||||
expect(calls).toContain("exec.approval.waitDecision");
|
||||
@ -330,18 +411,10 @@ describe("exec approvals", () => {
|
||||
it("starts a direct agent follow-up after approved gateway exec completes", async () => {
|
||||
const agentCalls: Array<Record<string, unknown>> = [];
|
||||
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: (params as { id?: string })?.id };
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: "allow-once" };
|
||||
}
|
||||
if (method === "agent") {
|
||||
agentCalls.push(params as Record<string, unknown>);
|
||||
return { status: "ok" };
|
||||
}
|
||||
return { ok: true };
|
||||
mockAcceptedApprovalFlow({
|
||||
onAgent: (params) => {
|
||||
agentCalls.push(params);
|
||||
},
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
@ -388,7 +461,7 @@ describe("exec approvals", () => {
|
||||
if (typeof request.id === "string") {
|
||||
requestIds.push(request.id);
|
||||
}
|
||||
return { status: "accepted", id: request.id };
|
||||
return acceptedApprovalResponse(request);
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
const wait = params as { id?: string };
|
||||
@ -400,12 +473,7 @@ describe("exec approvals", () => {
|
||||
return { ok: true };
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
ask: "on-miss",
|
||||
security: "allowlist",
|
||||
approvalRunningNoticeMs: 0,
|
||||
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
|
||||
});
|
||||
const tool = createElevatedAllowlistExecTool();
|
||||
|
||||
const first = await tool.execute("call-seq-1", {
|
||||
command: "npm view diver --json",
|
||||
@ -429,7 +497,7 @@ describe("exec approvals", () => {
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
|
||||
calls.push(method);
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: (params as { id?: string })?.id };
|
||||
return acceptedApprovalResponse(params);
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: "deny" };
|
||||
@ -448,11 +516,7 @@ describe("exec approvals", () => {
|
||||
command: "npm view diver --json | jq .name && brew outdated",
|
||||
});
|
||||
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
expect(pendingText).toContain(
|
||||
"Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```",
|
||||
);
|
||||
expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated");
|
||||
expect(calls).toContain("exec.approval.request");
|
||||
});
|
||||
|
||||
@ -480,11 +544,7 @@ describe("exec approvals", () => {
|
||||
command: "npm view diver --json | jq .name && brew outdated",
|
||||
});
|
||||
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
expect(pendingText).toContain(
|
||||
"Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```",
|
||||
);
|
||||
expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated");
|
||||
expect(calls).toContain("exec.approval.request");
|
||||
});
|
||||
|
||||
@ -551,30 +611,17 @@ describe("exec approvals", () => {
|
||||
});
|
||||
|
||||
it("returns an unavailable approval message instead of a local /approve prompt when discord exec approvals are disabled", async () => {
|
||||
const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
|
||||
await fs.mkdir(path.dirname(configPath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
configPath,
|
||||
JSON.stringify({
|
||||
channels: {
|
||||
discord: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: false },
|
||||
},
|
||||
await writeOpenClawConfig({
|
||||
channels: {
|
||||
discord: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: false },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: "approval-id" };
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: null };
|
||||
}
|
||||
return { ok: true };
|
||||
},
|
||||
});
|
||||
|
||||
mockPendingApprovalRegistration();
|
||||
|
||||
const tool = createExecTool({
|
||||
host: "gateway",
|
||||
ask: "always",
|
||||
@ -588,49 +635,29 @@ describe("exec approvals", () => {
|
||||
command: "npm view diver name version description",
|
||||
});
|
||||
|
||||
expect(result.details.status).toBe("approval-unavailable");
|
||||
const text = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
const text = expectApprovalUnavailableText(result);
|
||||
expect(text).toContain("chat exec approvals are not enabled on Discord");
|
||||
expect(text).toContain("Web UI or terminal UI");
|
||||
expect(text).not.toContain("/approve");
|
||||
expect(text).not.toContain("npm view diver name version description");
|
||||
expect(text).not.toContain("Pending command:");
|
||||
expect(text).not.toContain("Host:");
|
||||
expect(text).not.toContain("CWD:");
|
||||
});
|
||||
|
||||
it("tells Telegram users that allowed approvers were DMed when Telegram approvals are disabled but Discord DM approvals are enabled", async () => {
|
||||
const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
|
||||
await fs.mkdir(path.dirname(configPath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
configPath,
|
||||
JSON.stringify(
|
||||
{
|
||||
channels: {
|
||||
telegram: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: false },
|
||||
},
|
||||
discord: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: true, approvers: ["123"], target: "dm" },
|
||||
},
|
||||
await writeOpenClawConfig(
|
||||
{
|
||||
channels: {
|
||||
telegram: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: false },
|
||||
},
|
||||
discord: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: true, approvers: ["123"], target: "dm" },
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
},
|
||||
true,
|
||||
);
|
||||
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: "approval-id" };
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: null };
|
||||
}
|
||||
return { ok: true };
|
||||
});
|
||||
mockPendingApprovalRegistration();
|
||||
|
||||
const tool = createExecTool({
|
||||
host: "gateway",
|
||||
@ -645,14 +672,8 @@ describe("exec approvals", () => {
|
||||
command: "npm view diver name version description",
|
||||
});
|
||||
|
||||
expect(result.details.status).toBe("approval-unavailable");
|
||||
const text = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
const text = expectApprovalUnavailableText(result);
|
||||
expect(text).toContain("Approval required. I sent the allowed approvers DMs.");
|
||||
expect(text).not.toContain("/approve");
|
||||
expect(text).not.toContain("npm view diver name version description");
|
||||
expect(text).not.toContain("Pending command:");
|
||||
expect(text).not.toContain("Host:");
|
||||
expect(text).not.toContain("CWD:");
|
||||
});
|
||||
|
||||
it("denies node obfuscated command when approval request times out", async () => {
|
||||
|
||||
@ -46,6 +46,20 @@ function expectFallbackUsed(
|
||||
expect(result.attempts[0]?.reason).toBe("rate_limit");
|
||||
}
|
||||
|
||||
function expectPrimarySkippedForReason(
|
||||
result: { result: unknown; attempts: Array<{ reason?: string }> },
|
||||
run: {
|
||||
(...args: unknown[]): unknown;
|
||||
mock: { calls: unknown[][] };
|
||||
},
|
||||
reason: string,
|
||||
) {
|
||||
expect(result.result).toBe("ok");
|
||||
expect(run).toHaveBeenCalledTimes(1);
|
||||
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
|
||||
expect(result.attempts[0]?.reason).toBe(reason);
|
||||
}
|
||||
|
||||
function expectPrimaryProbeSuccess(
|
||||
result: { result: unknown },
|
||||
run: {
|
||||
@ -183,11 +197,7 @@ describe("runWithModelFallback – probe logic", () => {
|
||||
const run = vi.fn().mockResolvedValue("ok");
|
||||
|
||||
const result = await runPrimaryCandidate(cfg, run);
|
||||
|
||||
expect(result.result).toBe("ok");
|
||||
expect(run).toHaveBeenCalledTimes(1);
|
||||
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
|
||||
expect(result.attempts[0]?.reason).toBe("billing");
|
||||
expectPrimarySkippedForReason(result, run, "billing");
|
||||
});
|
||||
|
||||
it("probes primary model when within 2-min margin of cooldown expiry", async () => {
|
||||
@ -540,10 +550,6 @@ describe("runWithModelFallback – probe logic", () => {
|
||||
const run = vi.fn().mockResolvedValue("ok");
|
||||
|
||||
const result = await runPrimaryCandidate(cfg, run);
|
||||
|
||||
expect(result.result).toBe("ok");
|
||||
expect(run).toHaveBeenCalledTimes(1);
|
||||
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
|
||||
expect(result.attempts[0]?.reason).toBe("billing");
|
||||
expectPrimarySkippedForReason(result, run, "billing");
|
||||
});
|
||||
});
|
||||
|
||||
@ -80,131 +80,121 @@ describe("model-selection", () => {
|
||||
});
|
||||
|
||||
describe("parseModelRef", () => {
|
||||
it("should parse full model refs", () => {
|
||||
expect(parseModelRef("anthropic/claude-3-5-sonnet", "openai")).toEqual({
|
||||
provider: "anthropic",
|
||||
model: "claude-3-5-sonnet",
|
||||
});
|
||||
const expectParsedModelVariants = (
|
||||
variants: string[],
|
||||
defaultProvider: string,
|
||||
expected: { provider: string; model: string },
|
||||
) => {
|
||||
for (const raw of variants) {
|
||||
expect(parseModelRef(raw, defaultProvider), raw).toEqual(expected);
|
||||
}
|
||||
};
|
||||
|
||||
it.each([
|
||||
{
|
||||
name: "parses explicit provider/model refs",
|
||||
variants: ["anthropic/claude-3-5-sonnet"],
|
||||
defaultProvider: "openai",
|
||||
expected: { provider: "anthropic", model: "claude-3-5-sonnet" },
|
||||
},
|
||||
{
|
||||
name: "uses the default provider when omitted",
|
||||
variants: ["claude-3-5-sonnet"],
|
||||
defaultProvider: "anthropic",
|
||||
expected: { provider: "anthropic", model: "claude-3-5-sonnet" },
|
||||
},
|
||||
{
|
||||
name: "preserves nested model ids after the provider prefix",
|
||||
variants: ["nvidia/moonshotai/kimi-k2.5"],
|
||||
defaultProvider: "anthropic",
|
||||
expected: { provider: "nvidia", model: "moonshotai/kimi-k2.5" },
|
||||
},
|
||||
{
|
||||
name: "normalizes anthropic shorthand aliases",
|
||||
variants: ["anthropic/opus-4.6", "opus-4.6", " anthropic / opus-4.6 "],
|
||||
defaultProvider: "anthropic",
|
||||
expected: { provider: "anthropic", model: "claude-opus-4-6" },
|
||||
},
|
||||
{
|
||||
name: "normalizes anthropic sonnet aliases",
|
||||
variants: ["anthropic/sonnet-4.6", "sonnet-4.6"],
|
||||
defaultProvider: "anthropic",
|
||||
expected: { provider: "anthropic", model: "claude-sonnet-4-6" },
|
||||
},
|
||||
{
|
||||
name: "normalizes deprecated google flash preview ids",
|
||||
variants: ["google/gemini-3.1-flash-preview", "gemini-3.1-flash-preview"],
|
||||
defaultProvider: "google",
|
||||
expected: { provider: "google", model: "gemini-3-flash-preview" },
|
||||
},
|
||||
{
|
||||
name: "normalizes gemini 3.1 flash-lite ids",
|
||||
variants: ["google/gemini-3.1-flash-lite", "gemini-3.1-flash-lite"],
|
||||
defaultProvider: "google",
|
||||
expected: { provider: "google", model: "gemini-3.1-flash-lite-preview" },
|
||||
},
|
||||
{
|
||||
name: "keeps OpenAI codex refs on the openai provider",
|
||||
variants: ["openai/gpt-5.3-codex", "gpt-5.3-codex"],
|
||||
defaultProvider: "openai",
|
||||
expected: { provider: "openai", model: "gpt-5.3-codex" },
|
||||
},
|
||||
{
|
||||
name: "preserves openrouter native model prefixes",
|
||||
variants: ["openrouter/aurora-alpha"],
|
||||
defaultProvider: "openai",
|
||||
expected: { provider: "openrouter", model: "openrouter/aurora-alpha" },
|
||||
},
|
||||
{
|
||||
name: "passes through openrouter upstream provider ids",
|
||||
variants: ["openrouter/anthropic/claude-sonnet-4-5"],
|
||||
defaultProvider: "openai",
|
||||
expected: { provider: "openrouter", model: "anthropic/claude-sonnet-4-5" },
|
||||
},
|
||||
{
|
||||
name: "normalizes Vercel Claude shorthand to anthropic-prefixed model ids",
|
||||
variants: ["vercel-ai-gateway/claude-opus-4.6"],
|
||||
defaultProvider: "openai",
|
||||
expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4.6" },
|
||||
},
|
||||
{
|
||||
name: "normalizes Vercel Anthropic aliases without double-prefixing",
|
||||
variants: ["vercel-ai-gateway/opus-4.6"],
|
||||
defaultProvider: "openai",
|
||||
expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4-6" },
|
||||
},
|
||||
{
|
||||
name: "keeps already-prefixed Vercel Anthropic models unchanged",
|
||||
variants: ["vercel-ai-gateway/anthropic/claude-opus-4.6"],
|
||||
defaultProvider: "openai",
|
||||
expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4.6" },
|
||||
},
|
||||
{
|
||||
name: "passes through non-Claude Vercel model ids unchanged",
|
||||
variants: ["vercel-ai-gateway/openai/gpt-5.2"],
|
||||
defaultProvider: "openai",
|
||||
expected: { provider: "vercel-ai-gateway", model: "openai/gpt-5.2" },
|
||||
},
|
||||
{
|
||||
name: "keeps already-suffixed codex variants unchanged",
|
||||
variants: ["openai/gpt-5.3-codex-codex"],
|
||||
defaultProvider: "anthropic",
|
||||
expected: { provider: "openai", model: "gpt-5.3-codex-codex" },
|
||||
},
|
||||
])("$name", ({ variants, defaultProvider, expected }) => {
|
||||
expectParsedModelVariants(variants, defaultProvider, expected);
|
||||
});
|
||||
|
||||
it("preserves nested model ids after provider prefix", () => {
|
||||
expect(parseModelRef("nvidia/moonshotai/kimi-k2.5", "anthropic")).toEqual({
|
||||
provider: "nvidia",
|
||||
model: "moonshotai/kimi-k2.5",
|
||||
});
|
||||
it("round-trips normalized refs through modelKey", () => {
|
||||
const parsed = parseModelRef(" opus-4.6 ", "anthropic");
|
||||
expect(parsed).toEqual({ provider: "anthropic", model: "claude-opus-4-6" });
|
||||
expect(modelKey(parsed?.provider ?? "", parsed?.model ?? "")).toBe(
|
||||
"anthropic/claude-opus-4-6",
|
||||
);
|
||||
});
|
||||
|
||||
it("normalizes anthropic alias refs to canonical model ids", () => {
|
||||
expect(parseModelRef("anthropic/opus-4.6", "openai")).toEqual({
|
||||
provider: "anthropic",
|
||||
model: "claude-opus-4-6",
|
||||
});
|
||||
expect(parseModelRef("opus-4.6", "anthropic")).toEqual({
|
||||
provider: "anthropic",
|
||||
model: "claude-opus-4-6",
|
||||
});
|
||||
expect(parseModelRef("anthropic/sonnet-4.6", "openai")).toEqual({
|
||||
provider: "anthropic",
|
||||
model: "claude-sonnet-4-6",
|
||||
});
|
||||
expect(parseModelRef("sonnet-4.6", "anthropic")).toEqual({
|
||||
provider: "anthropic",
|
||||
model: "claude-sonnet-4-6",
|
||||
});
|
||||
});
|
||||
|
||||
it("should use default provider if none specified", () => {
|
||||
expect(parseModelRef("claude-3-5-sonnet", "anthropic")).toEqual({
|
||||
provider: "anthropic",
|
||||
model: "claude-3-5-sonnet",
|
||||
});
|
||||
});
|
||||
|
||||
it("normalizes deprecated google flash preview ids to the working model id", () => {
|
||||
expect(parseModelRef("google/gemini-3.1-flash-preview", "openai")).toEqual({
|
||||
provider: "google",
|
||||
model: "gemini-3-flash-preview",
|
||||
});
|
||||
expect(parseModelRef("gemini-3.1-flash-preview", "google")).toEqual({
|
||||
provider: "google",
|
||||
model: "gemini-3-flash-preview",
|
||||
});
|
||||
});
|
||||
|
||||
it("normalizes gemini 3.1 flash-lite to the preview model id", () => {
|
||||
expect(parseModelRef("google/gemini-3.1-flash-lite", "openai")).toEqual({
|
||||
provider: "google",
|
||||
model: "gemini-3.1-flash-lite-preview",
|
||||
});
|
||||
expect(parseModelRef("gemini-3.1-flash-lite", "google")).toEqual({
|
||||
provider: "google",
|
||||
model: "gemini-3.1-flash-lite-preview",
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps openai gpt-5.3 codex refs on the openai provider", () => {
|
||||
expect(parseModelRef("openai/gpt-5.3-codex", "anthropic")).toEqual({
|
||||
provider: "openai",
|
||||
model: "gpt-5.3-codex",
|
||||
});
|
||||
expect(parseModelRef("gpt-5.3-codex", "openai")).toEqual({
|
||||
provider: "openai",
|
||||
model: "gpt-5.3-codex",
|
||||
});
|
||||
expect(parseModelRef("openai/gpt-5.3-codex-codex", "anthropic")).toEqual({
|
||||
provider: "openai",
|
||||
model: "gpt-5.3-codex-codex",
|
||||
});
|
||||
});
|
||||
|
||||
it("should return null for empty strings", () => {
|
||||
expect(parseModelRef("", "anthropic")).toBeNull();
|
||||
expect(parseModelRef(" ", "anthropic")).toBeNull();
|
||||
});
|
||||
|
||||
it("should preserve openrouter/ prefix for native models", () => {
|
||||
expect(parseModelRef("openrouter/aurora-alpha", "openai")).toEqual({
|
||||
provider: "openrouter",
|
||||
model: "openrouter/aurora-alpha",
|
||||
});
|
||||
});
|
||||
|
||||
it("should pass through openrouter external provider models as-is", () => {
|
||||
expect(parseModelRef("openrouter/anthropic/claude-sonnet-4-5", "openai")).toEqual({
|
||||
provider: "openrouter",
|
||||
model: "anthropic/claude-sonnet-4-5",
|
||||
});
|
||||
});
|
||||
|
||||
it("normalizes Vercel Claude shorthand to anthropic-prefixed model ids", () => {
|
||||
expect(parseModelRef("vercel-ai-gateway/claude-opus-4.6", "openai")).toEqual({
|
||||
provider: "vercel-ai-gateway",
|
||||
model: "anthropic/claude-opus-4.6",
|
||||
});
|
||||
expect(parseModelRef("vercel-ai-gateway/opus-4.6", "openai")).toEqual({
|
||||
provider: "vercel-ai-gateway",
|
||||
model: "anthropic/claude-opus-4-6",
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps already-prefixed Vercel Anthropic models unchanged", () => {
|
||||
expect(parseModelRef("vercel-ai-gateway/anthropic/claude-opus-4.6", "openai")).toEqual({
|
||||
provider: "vercel-ai-gateway",
|
||||
model: "anthropic/claude-opus-4.6",
|
||||
});
|
||||
});
|
||||
|
||||
it("passes through non-Claude Vercel model ids unchanged", () => {
|
||||
expect(parseModelRef("vercel-ai-gateway/openai/gpt-5.2", "openai")).toEqual({
|
||||
provider: "vercel-ai-gateway",
|
||||
model: "openai/gpt-5.2",
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle invalid slash usage", () => {
|
||||
expect(parseModelRef("/", "anthropic")).toBeNull();
|
||||
expect(parseModelRef("anthropic/", "anthropic")).toBeNull();
|
||||
expect(parseModelRef("/model", "anthropic")).toBeNull();
|
||||
it.each(["", " ", "/", "anthropic/", "/model"])("returns null for invalid ref %j", (raw) => {
|
||||
expect(parseModelRef(raw, "anthropic")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@ -113,6 +113,92 @@ function createMoonshotConfig(overrides: {
|
||||
};
|
||||
}
|
||||
|
||||
function createOpenAiConfigWithResolvedApiKey(mergeMode = false): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
...(mergeMode ? { mode: "merge" as const } : {}),
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY}
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "gpt-4.1",
|
||||
name: "GPT-4.1",
|
||||
input: ["text"],
|
||||
reasoning: false,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 16384,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function expectOpenAiEnvMarkerApiKey(options?: { seedMergedProvider?: boolean }) {
|
||||
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
|
||||
await withTempHome(async () => {
|
||||
if (options?.seedMergedProvider) {
|
||||
await writeAgentModelsJson({
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret
|
||||
api: "openai-completions",
|
||||
models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }],
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
await ensureOpenClawModelsJson(
|
||||
createOpenAiConfigWithResolvedApiKey(options?.seedMergedProvider),
|
||||
);
|
||||
const result = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function expectMoonshotTokenLimits(params: {
|
||||
contextWindow: number;
|
||||
maxTokens: number;
|
||||
expectedContextWindow: number;
|
||||
expectedMaxTokens: number;
|
||||
}) {
|
||||
await withTempHome(async () => {
|
||||
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
|
||||
await ensureOpenClawModelsJson(
|
||||
createMoonshotConfig({
|
||||
contextWindow: params.contextWindow,
|
||||
maxTokens: params.maxTokens,
|
||||
}),
|
||||
);
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<
|
||||
string,
|
||||
{
|
||||
models?: Array<{
|
||||
id: string;
|
||||
contextWindow?: number;
|
||||
maxTokens?: number;
|
||||
}>;
|
||||
}
|
||||
>;
|
||||
}>();
|
||||
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
|
||||
expect(kimi?.contextWindow).toBe(params.expectedContextWindow);
|
||||
expect(kimi?.maxTokens).toBe(params.expectedMaxTokens);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
describe("models-config", () => {
|
||||
it("keeps anthropic api defaults when model entries omit api", async () => {
|
||||
await withTempHome(async () => {
|
||||
@ -444,131 +530,28 @@ describe("models-config", () => {
|
||||
});
|
||||
|
||||
it("does not persist resolved env var value as plaintext in models.json", async () => {
|
||||
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
|
||||
await withTempHome(async () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; already resolved by loadConfig
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "gpt-4.1",
|
||||
name: "GPT-4.1",
|
||||
input: ["text"],
|
||||
reasoning: false,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 16384,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
const result = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY");
|
||||
});
|
||||
});
|
||||
await expectOpenAiEnvMarkerApiKey();
|
||||
});
|
||||
|
||||
it("replaces stale merged apiKey when config key normalizes to a known env marker", async () => {
|
||||
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
|
||||
await withTempHome(async () => {
|
||||
await writeAgentModelsJson({
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret
|
||||
api: "openai-completions",
|
||||
models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }],
|
||||
},
|
||||
},
|
||||
});
|
||||
const cfg: OpenClawConfig = {
|
||||
models: {
|
||||
mode: "merge",
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY}
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "gpt-4.1",
|
||||
name: "GPT-4.1",
|
||||
input: ["text"],
|
||||
reasoning: false,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 16384,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
const result = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
});
|
||||
});
|
||||
await expectOpenAiEnvMarkerApiKey({ seedMergedProvider: true });
|
||||
});
|
||||
|
||||
it("preserves explicit larger token limits when they exceed implicit catalog defaults", async () => {
|
||||
await withTempHome(async () => {
|
||||
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
|
||||
const cfg = createMoonshotConfig({ contextWindow: 350000, maxTokens: 16384 });
|
||||
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<
|
||||
string,
|
||||
{
|
||||
models?: Array<{
|
||||
id: string;
|
||||
contextWindow?: number;
|
||||
maxTokens?: number;
|
||||
}>;
|
||||
}
|
||||
>;
|
||||
}>();
|
||||
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
|
||||
expect(kimi?.contextWindow).toBe(350000);
|
||||
expect(kimi?.maxTokens).toBe(16384);
|
||||
});
|
||||
await expectMoonshotTokenLimits({
|
||||
contextWindow: 350000,
|
||||
maxTokens: 16384,
|
||||
expectedContextWindow: 350000,
|
||||
expectedMaxTokens: 16384,
|
||||
});
|
||||
});
|
||||
|
||||
it("falls back to implicit token limits when explicit values are invalid", async () => {
|
||||
await withTempHome(async () => {
|
||||
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
|
||||
const cfg = createMoonshotConfig({ contextWindow: 0, maxTokens: -1 });
|
||||
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<
|
||||
string,
|
||||
{
|
||||
models?: Array<{
|
||||
id: string;
|
||||
contextWindow?: number;
|
||||
maxTokens?: number;
|
||||
}>;
|
||||
}
|
||||
>;
|
||||
}>();
|
||||
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
|
||||
expect(kimi?.contextWindow).toBe(256000);
|
||||
expect(kimi?.maxTokens).toBe(8192);
|
||||
});
|
||||
await expectMoonshotTokenLimits({
|
||||
contextWindow: 0,
|
||||
maxTokens: -1,
|
||||
expectedContextWindow: 256000,
|
||||
expectedMaxTokens: 8192,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -1,91 +1,82 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import type { ModelDefinitionConfig } from "../config/types.models.js";
|
||||
import { installModelsConfigTestHooks, withModelsTempHome } from "./models-config.e2e-harness.js";
|
||||
import { ensureOpenClawModelsJson } from "./models-config.js";
|
||||
import { readGeneratedModelsJson } from "./models-config.test-utils.js";
|
||||
|
||||
function createGoogleModelsConfig(models: ModelDefinitionConfig[]): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
google: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
|
||||
apiKey: "GEMINI_KEY", // pragma: allowlist secret
|
||||
api: "google-generative-ai",
|
||||
models,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function expectGeneratedGoogleModelIds(ids: string[]) {
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { models: Array<{ id: string }> }>;
|
||||
}>();
|
||||
expect(parsed.providers.google?.models?.map((model) => model.id)).toEqual(ids);
|
||||
}
|
||||
|
||||
describe("models-config", () => {
|
||||
installModelsConfigTestHooks();
|
||||
|
||||
it("normalizes gemini 3 ids to preview for google providers", async () => {
|
||||
await withModelsTempHome(async () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
google: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
|
||||
apiKey: "GEMINI_KEY", // pragma: allowlist secret
|
||||
api: "google-generative-ai",
|
||||
models: [
|
||||
{
|
||||
id: "gemini-3-pro",
|
||||
name: "Gemini 3 Pro",
|
||||
api: "google-generative-ai",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
{
|
||||
id: "gemini-3-flash",
|
||||
name: "Gemini 3 Flash",
|
||||
api: "google-generative-ai",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
const cfg = createGoogleModelsConfig([
|
||||
{
|
||||
id: "gemini-3-pro",
|
||||
name: "Gemini 3 Pro",
|
||||
api: "google-generative-ai",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
};
|
||||
{
|
||||
id: "gemini-3-flash",
|
||||
name: "Gemini 3 Flash",
|
||||
api: "google-generative-ai",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
]);
|
||||
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { models: Array<{ id: string }> }>;
|
||||
}>();
|
||||
const ids = parsed.providers.google?.models?.map((model) => model.id);
|
||||
expect(ids).toEqual(["gemini-3-pro-preview", "gemini-3-flash-preview"]);
|
||||
await expectGeneratedGoogleModelIds(["gemini-3-pro-preview", "gemini-3-flash-preview"]);
|
||||
});
|
||||
});
|
||||
|
||||
it("normalizes the deprecated google flash preview id to the working preview id", async () => {
|
||||
await withModelsTempHome(async () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
google: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
|
||||
apiKey: "GEMINI_KEY", // pragma: allowlist secret
|
||||
api: "google-generative-ai",
|
||||
models: [
|
||||
{
|
||||
id: "gemini-3.1-flash-preview",
|
||||
name: "Gemini 3.1 Flash Preview",
|
||||
api: "google-generative-ai",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
const cfg = createGoogleModelsConfig([
|
||||
{
|
||||
id: "gemini-3.1-flash-preview",
|
||||
name: "Gemini 3.1 Flash Preview",
|
||||
api: "google-generative-ai",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
};
|
||||
]);
|
||||
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { models: Array<{ id: string }> }>;
|
||||
}>();
|
||||
const ids = parsed.providers.google?.models?.map((model) => model.id);
|
||||
expect(ids).toEqual(["gemini-3-flash-preview"]);
|
||||
await expectGeneratedGoogleModelIds(["gemini-3-flash-preview"]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -16,47 +16,137 @@ import { readGeneratedModelsJson } from "./models-config.test-utils.js";
|
||||
|
||||
installModelsConfigTestHooks();
|
||||
|
||||
function createOpenAiApiKeySourceConfig(): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function createOpenAiApiKeyRuntimeConfig(): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function createOpenAiHeaderSourceConfig(): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: {
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
},
|
||||
"X-Tenant-Token": {
|
||||
source: "file",
|
||||
provider: "vault",
|
||||
id: "/providers/openai/tenantToken",
|
||||
},
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function createOpenAiHeaderRuntimeConfig(): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: "Bearer runtime-openai-token",
|
||||
"X-Tenant-Token": "runtime-tenant-token",
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function withGatewayTokenMode(config: OpenClawConfig): OpenClawConfig {
|
||||
return {
|
||||
...config,
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function withGeneratedModelsFromRuntimeSource(
|
||||
params: {
|
||||
sourceConfig: OpenClawConfig;
|
||||
runtimeConfig: OpenClawConfig;
|
||||
candidateConfig?: OpenClawConfig;
|
||||
},
|
||||
runAssertions: () => Promise<void>,
|
||||
) {
|
||||
await withTempHome(async () => {
|
||||
try {
|
||||
setRuntimeConfigSnapshot(params.runtimeConfig, params.sourceConfig);
|
||||
await ensureOpenClawModelsJson(params.candidateConfig ?? loadConfig());
|
||||
await runAssertions();
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function expectGeneratedProviderApiKey(providerId: string, expected: string) {
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(parsed.providers[providerId]?.apiKey).toBe(expected);
|
||||
}
|
||||
|
||||
async function expectGeneratedOpenAiHeaderMarkers() {
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { headers?: Record<string, string> }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.headers?.Authorization).toBe(
|
||||
"secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
);
|
||||
expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER);
|
||||
}
|
||||
|
||||
describe("models-config runtime source snapshot", () => {
|
||||
it("uses runtime source snapshot markers when passed the active runtime config", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(loadConfig());
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
}
|
||||
});
|
||||
await withGeneratedModelsFromRuntimeSource(
|
||||
{
|
||||
sourceConfig: createOpenAiApiKeySourceConfig(),
|
||||
runtimeConfig: createOpenAiApiKeyRuntimeConfig(),
|
||||
},
|
||||
async () => expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"), // pragma: allowlist secret
|
||||
);
|
||||
});
|
||||
|
||||
it("uses non-env marker from runtime source snapshot for file refs", async () => {
|
||||
@ -103,30 +193,8 @@ describe("models-config runtime source snapshot", () => {
|
||||
|
||||
it("projects cloned runtime configs onto source snapshot when preserving provider auth", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const sourceConfig = createOpenAiApiKeySourceConfig();
|
||||
const runtimeConfig = createOpenAiApiKeyRuntimeConfig();
|
||||
const clonedRuntimeConfig: OpenClawConfig = {
|
||||
...runtimeConfig,
|
||||
agents: {
|
||||
@ -139,11 +207,7 @@ describe("models-config runtime source snapshot", () => {
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(clonedRuntimeConfig);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
@ -152,121 +216,27 @@ describe("models-config runtime source snapshot", () => {
|
||||
});
|
||||
|
||||
it("uses header markers from runtime source snapshot instead of resolved runtime values", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: {
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
},
|
||||
"X-Tenant-Token": {
|
||||
source: "file",
|
||||
provider: "vault",
|
||||
id: "/providers/openai/tenantToken",
|
||||
},
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: "Bearer runtime-openai-token",
|
||||
"X-Tenant-Token": "runtime-tenant-token",
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(loadConfig());
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { headers?: Record<string, string> }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.headers?.Authorization).toBe(
|
||||
"secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
);
|
||||
expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER);
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
}
|
||||
});
|
||||
await withGeneratedModelsFromRuntimeSource(
|
||||
{
|
||||
sourceConfig: createOpenAiHeaderSourceConfig(),
|
||||
runtimeConfig: createOpenAiHeaderRuntimeConfig(),
|
||||
},
|
||||
expectGeneratedOpenAiHeaderMarkers,
|
||||
);
|
||||
});
|
||||
|
||||
it("keeps source markers when runtime projection is skipped for incompatible top-level shape", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const sourceConfig = withGatewayTokenMode(createOpenAiApiKeySourceConfig());
|
||||
const runtimeConfig = withGatewayTokenMode(createOpenAiApiKeyRuntimeConfig());
|
||||
const incompatibleCandidate: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
...createOpenAiApiKeyRuntimeConfig(),
|
||||
};
|
||||
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(incompatibleCandidate);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
@ -276,81 +246,16 @@ describe("models-config runtime source snapshot", () => {
|
||||
|
||||
it("keeps source header markers when runtime projection is skipped for incompatible top-level shape", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: {
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
},
|
||||
"X-Tenant-Token": {
|
||||
source: "file",
|
||||
provider: "vault",
|
||||
id: "/providers/openai/tenantToken",
|
||||
},
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: "Bearer runtime-openai-token",
|
||||
"X-Tenant-Token": "runtime-tenant-token",
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const sourceConfig = withGatewayTokenMode(createOpenAiHeaderSourceConfig());
|
||||
const runtimeConfig = withGatewayTokenMode(createOpenAiHeaderRuntimeConfig());
|
||||
const incompatibleCandidate: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: "Bearer runtime-openai-token",
|
||||
"X-Tenant-Token": "runtime-tenant-token",
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
...createOpenAiHeaderRuntimeConfig(),
|
||||
};
|
||||
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(incompatibleCandidate);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { headers?: Record<string, string> }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.headers?.Authorization).toBe(
|
||||
"secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
);
|
||||
expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER);
|
||||
await expectGeneratedOpenAiHeaderMarkers();
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
|
||||
@ -1,31 +1,11 @@
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js";
|
||||
import {
|
||||
enrichOllamaModelsWithContext,
|
||||
resolveOllamaApiBase,
|
||||
type OllamaTagModel,
|
||||
} from "./ollama-models.js";
|
||||
|
||||
function jsonResponse(body: unknown, status = 200): Response {
|
||||
return new Response(JSON.stringify(body), {
|
||||
status,
|
||||
headers: { "Content-Type": "application/json" },
|
||||
});
|
||||
}
|
||||
|
||||
function requestUrl(input: string | URL | Request): string {
|
||||
if (typeof input === "string") {
|
||||
return input;
|
||||
}
|
||||
if (input instanceof URL) {
|
||||
return input.toString();
|
||||
}
|
||||
return input.url;
|
||||
}
|
||||
|
||||
function requestBody(body: BodyInit | null | undefined): string {
|
||||
return typeof body === "string" ? body : "{}";
|
||||
}
|
||||
|
||||
describe("ollama-models", () => {
|
||||
afterEach(() => {
|
||||
vi.unstubAllGlobals();
|
||||
@ -43,7 +23,7 @@ describe("ollama-models", () => {
|
||||
if (!url.endsWith("/api/show")) {
|
||||
throw new Error(`Unexpected fetch: ${url}`);
|
||||
}
|
||||
const body = JSON.parse(requestBody(init?.body)) as { name?: string };
|
||||
const body = JSON.parse(requestBodyText(init?.body)) as { name?: string };
|
||||
if (body.name === "llama3:8b") {
|
||||
return jsonResponse({ model_info: { "llama.context_length": 65536 } });
|
||||
}
|
||||
|
||||
@ -106,7 +106,7 @@ describe("buildAssistantMessage", () => {
|
||||
expect(result.usage.totalTokens).toBe(15);
|
||||
});
|
||||
|
||||
it("falls back to thinking when content is empty", () => {
|
||||
it("drops thinking-only output when content is empty", () => {
|
||||
const response = {
|
||||
model: "qwen3:32b",
|
||||
created_at: "2026-01-01T00:00:00Z",
|
||||
@ -119,10 +119,10 @@ describe("buildAssistantMessage", () => {
|
||||
};
|
||||
const result = buildAssistantMessage(response, modelInfo);
|
||||
expect(result.stopReason).toBe("stop");
|
||||
expect(result.content).toEqual([{ type: "text", text: "Thinking output" }]);
|
||||
expect(result.content).toEqual([]);
|
||||
});
|
||||
|
||||
it("falls back to reasoning when content and thinking are empty", () => {
|
||||
it("drops reasoning-only output when content and thinking are empty", () => {
|
||||
const response = {
|
||||
model: "qwen3:32b",
|
||||
created_at: "2026-01-01T00:00:00Z",
|
||||
@ -135,7 +135,7 @@ describe("buildAssistantMessage", () => {
|
||||
};
|
||||
const result = buildAssistantMessage(response, modelInfo);
|
||||
expect(result.stopReason).toBe("stop");
|
||||
expect(result.content).toEqual([{ type: "text", text: "Reasoning output" }]);
|
||||
expect(result.content).toEqual([]);
|
||||
});
|
||||
|
||||
it("builds response with tool calls", () => {
|
||||
@ -203,6 +203,20 @@ function mockNdjsonReader(lines: string[]): ReadableStreamDefaultReader<Uint8Arr
|
||||
} as unknown as ReadableStreamDefaultReader<Uint8Array>;
|
||||
}
|
||||
|
||||
async function expectDoneEventContent(lines: string[], expectedContent: unknown) {
|
||||
await withMockNdjsonFetch(lines, async () => {
|
||||
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
|
||||
const events = await collectStreamEvents(stream);
|
||||
|
||||
const doneEvent = events.at(-1);
|
||||
if (!doneEvent || doneEvent.type !== "done") {
|
||||
throw new Error("Expected done event");
|
||||
}
|
||||
|
||||
expect(doneEvent.message.content).toEqual(expectedContent);
|
||||
});
|
||||
}
|
||||
|
||||
describe("parseNdjsonStream", () => {
|
||||
it("parses text-only streaming chunks", async () => {
|
||||
const reader = mockNdjsonReader([
|
||||
@ -485,89 +499,49 @@ describe("createOllamaStreamFn", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("accumulates thinking chunks when content is empty", async () => {
|
||||
await withMockNdjsonFetch(
|
||||
it("drops thinking chunks when no final content is emitted", async () => {
|
||||
await expectDoneEventContent(
|
||||
[
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"reasoned"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":" output"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
|
||||
],
|
||||
async () => {
|
||||
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
|
||||
const events = await collectStreamEvents(stream);
|
||||
|
||||
const doneEvent = events.at(-1);
|
||||
if (!doneEvent || doneEvent.type !== "done") {
|
||||
throw new Error("Expected done event");
|
||||
}
|
||||
|
||||
expect(doneEvent.message.content).toEqual([{ type: "text", text: "reasoned output" }]);
|
||||
},
|
||||
[],
|
||||
);
|
||||
});
|
||||
|
||||
it("prefers streamed content over earlier thinking chunks", async () => {
|
||||
await withMockNdjsonFetch(
|
||||
await expectDoneEventContent(
|
||||
[
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"internal"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
|
||||
],
|
||||
async () => {
|
||||
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
|
||||
const events = await collectStreamEvents(stream);
|
||||
|
||||
const doneEvent = events.at(-1);
|
||||
if (!doneEvent || doneEvent.type !== "done") {
|
||||
throw new Error("Expected done event");
|
||||
}
|
||||
|
||||
expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]);
|
||||
},
|
||||
[{ type: "text", text: "final answer" }],
|
||||
);
|
||||
});
|
||||
|
||||
it("accumulates reasoning chunks when thinking is absent", async () => {
|
||||
await withMockNdjsonFetch(
|
||||
it("drops reasoning chunks when no final content is emitted", async () => {
|
||||
await expectDoneEventContent(
|
||||
[
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"reasoned"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":" output"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
|
||||
],
|
||||
async () => {
|
||||
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
|
||||
const events = await collectStreamEvents(stream);
|
||||
|
||||
const doneEvent = events.at(-1);
|
||||
if (!doneEvent || doneEvent.type !== "done") {
|
||||
throw new Error("Expected done event");
|
||||
}
|
||||
|
||||
expect(doneEvent.message.content).toEqual([{ type: "text", text: "reasoned output" }]);
|
||||
},
|
||||
[],
|
||||
);
|
||||
});
|
||||
|
||||
it("prefers streamed content over earlier reasoning chunks", async () => {
|
||||
await withMockNdjsonFetch(
|
||||
await expectDoneEventContent(
|
||||
[
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"internal"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
|
||||
],
|
||||
async () => {
|
||||
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
|
||||
const events = await collectStreamEvents(stream);
|
||||
|
||||
const doneEvent = events.at(-1);
|
||||
if (!doneEvent || doneEvent.type !== "done") {
|
||||
throw new Error("Expected done event");
|
||||
}
|
||||
|
||||
expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]);
|
||||
},
|
||||
[{ type: "text", text: "final answer" }],
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@ -340,10 +340,9 @@ export function buildAssistantMessage(
|
||||
): AssistantMessage {
|
||||
const content: (TextContent | ToolCall)[] = [];
|
||||
|
||||
// Ollama-native reasoning models may emit their answer in `thinking` or
|
||||
// `reasoning` with an empty `content`. Fall back so replies are not dropped.
|
||||
const text =
|
||||
response.message.content || response.message.thinking || response.message.reasoning || "";
|
||||
// Native Ollama reasoning fields are internal model output. The reply text
|
||||
// must come from `content`; reasoning visibility is controlled elsewhere.
|
||||
const text = response.message.content || "";
|
||||
if (text) {
|
||||
content.push({ type: "text", text });
|
||||
}
|
||||
@ -497,20 +496,12 @@ export function createOllamaStreamFn(
|
||||
|
||||
const reader = response.body.getReader();
|
||||
let accumulatedContent = "";
|
||||
let fallbackContent = "";
|
||||
let sawContent = false;
|
||||
const accumulatedToolCalls: OllamaToolCall[] = [];
|
||||
let finalResponse: OllamaChatResponse | undefined;
|
||||
|
||||
for await (const chunk of parseNdjsonStream(reader)) {
|
||||
if (chunk.message?.content) {
|
||||
sawContent = true;
|
||||
accumulatedContent += chunk.message.content;
|
||||
} else if (!sawContent && chunk.message?.thinking) {
|
||||
fallbackContent += chunk.message.thinking;
|
||||
} else if (!sawContent && chunk.message?.reasoning) {
|
||||
// Backward compatibility for older/native variants that still use reasoning.
|
||||
fallbackContent += chunk.message.reasoning;
|
||||
}
|
||||
|
||||
// Ollama sends tool_calls in intermediate (done:false) chunks,
|
||||
@ -529,7 +520,7 @@ export function createOllamaStreamFn(
|
||||
throw new Error("Ollama API stream ended without a final response");
|
||||
}
|
||||
|
||||
finalResponse.message.content = accumulatedContent || fallbackContent;
|
||||
finalResponse.message.content = accumulatedContent;
|
||||
if (accumulatedToolCalls.length > 0) {
|
||||
finalResponse.message.tool_calls = accumulatedToolCalls;
|
||||
}
|
||||
|
||||
@ -115,6 +115,50 @@ function resetSessionStore(store: Record<string, unknown>) {
|
||||
mockConfig = createMockConfig();
|
||||
}
|
||||
|
||||
function installSandboxedSessionStatusConfig() {
|
||||
mockConfig = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
tools: {
|
||||
sessions: { visibility: "all" },
|
||||
agentToAgent: { enabled: true, allow: ["*"] },
|
||||
},
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-5" },
|
||||
models: {},
|
||||
sandbox: { sessionToolsVisibility: "spawned" },
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function mockSpawnedSessionList(
|
||||
resolveSessions: (spawnedBy: string | undefined) => Array<Record<string, unknown>>,
|
||||
) {
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: Record<string, unknown> };
|
||||
if (request.method === "sessions.list") {
|
||||
return { sessions: resolveSessions(request.params?.spawnedBy as string | undefined) };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
}
|
||||
|
||||
function expectSpawnedSessionLookupCalls(spawnedBy: string) {
|
||||
const expectedCall = {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy,
|
||||
},
|
||||
};
|
||||
expect(callGatewayMock).toHaveBeenCalledTimes(2);
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(1, expectedCall);
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(2, expectedCall);
|
||||
}
|
||||
|
||||
function getSessionStatusTool(agentSessionKey = "main", options?: { sandboxed?: boolean }) {
|
||||
const tool = createOpenClawTools({
|
||||
agentSessionKey,
|
||||
@ -242,27 +286,8 @@ describe("session_status tool", () => {
|
||||
updatedAt: 10,
|
||||
},
|
||||
});
|
||||
mockConfig = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
tools: {
|
||||
sessions: { visibility: "all" },
|
||||
agentToAgent: { enabled: true, allow: ["*"] },
|
||||
},
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-5" },
|
||||
models: {},
|
||||
sandbox: { sessionToolsVisibility: "spawned" },
|
||||
},
|
||||
},
|
||||
};
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: Record<string, unknown> };
|
||||
if (request.method === "sessions.list") {
|
||||
return { sessions: [] };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
installSandboxedSessionStatusConfig();
|
||||
mockSpawnedSessionList(() => []);
|
||||
|
||||
const tool = getSessionStatusTool("agent:main:subagent:child", {
|
||||
sandboxed: true,
|
||||
@ -284,25 +309,7 @@ describe("session_status tool", () => {
|
||||
|
||||
expect(loadSessionStoreMock).not.toHaveBeenCalled();
|
||||
expect(updateSessionStoreMock).not.toHaveBeenCalled();
|
||||
expect(callGatewayMock).toHaveBeenCalledTimes(2);
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(1, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "agent:main:subagent:child",
|
||||
},
|
||||
});
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(2, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "agent:main:subagent:child",
|
||||
},
|
||||
});
|
||||
expectSpawnedSessionLookupCalls("agent:main:subagent:child");
|
||||
});
|
||||
|
||||
it("keeps legacy main requester keys for sandboxed session tree checks", async () => {
|
||||
@ -316,30 +323,10 @@ describe("session_status tool", () => {
|
||||
updatedAt: 20,
|
||||
},
|
||||
});
|
||||
mockConfig = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
tools: {
|
||||
sessions: { visibility: "all" },
|
||||
agentToAgent: { enabled: true, allow: ["*"] },
|
||||
},
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-5" },
|
||||
models: {},
|
||||
sandbox: { sessionToolsVisibility: "spawned" },
|
||||
},
|
||||
},
|
||||
};
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: Record<string, unknown> };
|
||||
if (request.method === "sessions.list") {
|
||||
return {
|
||||
sessions:
|
||||
request.params?.spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [],
|
||||
};
|
||||
}
|
||||
return {};
|
||||
});
|
||||
installSandboxedSessionStatusConfig();
|
||||
mockSpawnedSessionList((spawnedBy) =>
|
||||
spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [],
|
||||
);
|
||||
|
||||
const tool = getSessionStatusTool("main", {
|
||||
sandboxed: true,
|
||||
@ -357,25 +344,7 @@ describe("session_status tool", () => {
|
||||
expect(childDetails.ok).toBe(true);
|
||||
expect(childDetails.sessionKey).toBe("agent:main:subagent:child");
|
||||
|
||||
expect(callGatewayMock).toHaveBeenCalledTimes(2);
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(1, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "main",
|
||||
},
|
||||
});
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(2, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "main",
|
||||
},
|
||||
});
|
||||
expectSpawnedSessionLookupCalls("main");
|
||||
});
|
||||
|
||||
it("scopes bare session keys to the requester agent", async () => {
|
||||
|
||||
@ -17,6 +17,63 @@ function writeStore(storePath: string, store: Record<string, unknown>) {
|
||||
fs.writeFileSync(storePath, JSON.stringify(store, null, 2), "utf-8");
|
||||
}
|
||||
|
||||
function seedLeafOwnedChildSession(storePath: string, leafKey = "agent:main:subagent:leaf") {
|
||||
const childKey = `${leafKey}:subagent:child`;
|
||||
writeStore(storePath, {
|
||||
[leafKey]: {
|
||||
sessionId: "leaf-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: "agent:main:main",
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
[childKey]: {
|
||||
sessionId: "child-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: leafKey,
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
});
|
||||
|
||||
addSubagentRunForTests({
|
||||
runId: "run-child",
|
||||
childSessionKey: childKey,
|
||||
controllerSessionKey: leafKey,
|
||||
requesterSessionKey: leafKey,
|
||||
requesterDisplayKey: leafKey,
|
||||
task: "impossible child",
|
||||
cleanup: "keep",
|
||||
createdAt: Date.now() - 30_000,
|
||||
startedAt: Date.now() - 30_000,
|
||||
});
|
||||
|
||||
return {
|
||||
childKey,
|
||||
tool: createSubagentsTool({ agentSessionKey: leafKey }),
|
||||
};
|
||||
}
|
||||
|
||||
async function expectLeafSubagentControlForbidden(params: {
|
||||
storePath: string;
|
||||
action: "kill" | "steer";
|
||||
callId: string;
|
||||
message?: string;
|
||||
}) {
|
||||
const { childKey, tool } = seedLeafOwnedChildSession(params.storePath);
|
||||
const result = await tool.execute(params.callId, {
|
||||
action: params.action,
|
||||
target: childKey,
|
||||
...(params.message ? { message: params.message } : {}),
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "forbidden",
|
||||
error: "Leaf subagents cannot control other sessions.",
|
||||
});
|
||||
expect(callGatewayMock).not.toHaveBeenCalled();
|
||||
}
|
||||
|
||||
describe("openclaw-tools: subagents scope isolation", () => {
|
||||
let storePath = "";
|
||||
|
||||
@ -151,95 +208,19 @@ describe("openclaw-tools: subagents scope isolation", () => {
|
||||
});
|
||||
|
||||
it("leaf subagents cannot kill even explicitly-owned child sessions", async () => {
|
||||
const leafKey = "agent:main:subagent:leaf";
|
||||
const childKey = `${leafKey}:subagent:child`;
|
||||
|
||||
writeStore(storePath, {
|
||||
[leafKey]: {
|
||||
sessionId: "leaf-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: "agent:main:main",
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
[childKey]: {
|
||||
sessionId: "child-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: leafKey,
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
});
|
||||
|
||||
addSubagentRunForTests({
|
||||
runId: "run-child",
|
||||
childSessionKey: childKey,
|
||||
controllerSessionKey: leafKey,
|
||||
requesterSessionKey: leafKey,
|
||||
requesterDisplayKey: leafKey,
|
||||
task: "impossible child",
|
||||
cleanup: "keep",
|
||||
createdAt: Date.now() - 30_000,
|
||||
startedAt: Date.now() - 30_000,
|
||||
});
|
||||
|
||||
const tool = createSubagentsTool({ agentSessionKey: leafKey });
|
||||
const result = await tool.execute("call-leaf-kill", {
|
||||
await expectLeafSubagentControlForbidden({
|
||||
storePath,
|
||||
action: "kill",
|
||||
target: childKey,
|
||||
callId: "call-leaf-kill",
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "forbidden",
|
||||
error: "Leaf subagents cannot control other sessions.",
|
||||
});
|
||||
expect(callGatewayMock).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("leaf subagents cannot steer even explicitly-owned child sessions", async () => {
|
||||
const leafKey = "agent:main:subagent:leaf";
|
||||
const childKey = `${leafKey}:subagent:child`;
|
||||
|
||||
writeStore(storePath, {
|
||||
[leafKey]: {
|
||||
sessionId: "leaf-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: "agent:main:main",
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
[childKey]: {
|
||||
sessionId: "child-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: leafKey,
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
});
|
||||
|
||||
addSubagentRunForTests({
|
||||
runId: "run-child",
|
||||
childSessionKey: childKey,
|
||||
controllerSessionKey: leafKey,
|
||||
requesterSessionKey: leafKey,
|
||||
requesterDisplayKey: leafKey,
|
||||
task: "impossible child",
|
||||
cleanup: "keep",
|
||||
createdAt: Date.now() - 30_000,
|
||||
startedAt: Date.now() - 30_000,
|
||||
});
|
||||
|
||||
const tool = createSubagentsTool({ agentSessionKey: leafKey });
|
||||
const result = await tool.execute("call-leaf-steer", {
|
||||
await expectLeafSubagentControlForbidden({
|
||||
storePath,
|
||||
action: "steer",
|
||||
target: childKey,
|
||||
callId: "call-leaf-steer",
|
||||
message: "continue",
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "forbidden",
|
||||
error: "Leaf subagents cannot control other sessions.",
|
||||
});
|
||||
expect(callGatewayMock).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@ -174,15 +174,18 @@ export function createOpenClawTools(
|
||||
createSessionsListTool({
|
||||
agentSessionKey: options?.agentSessionKey,
|
||||
sandboxed: options?.sandboxed,
|
||||
config: options?.config,
|
||||
}),
|
||||
createSessionsHistoryTool({
|
||||
agentSessionKey: options?.agentSessionKey,
|
||||
sandboxed: options?.sandboxed,
|
||||
config: options?.config,
|
||||
}),
|
||||
createSessionsSendTool({
|
||||
agentSessionKey: options?.agentSessionKey,
|
||||
agentChannel: options?.agentChannel,
|
||||
sandboxed: options?.sandboxed,
|
||||
config: options?.config,
|
||||
}),
|
||||
createSessionsYieldTool({
|
||||
sessionId: options?.sessionId,
|
||||
|
||||
@ -45,98 +45,117 @@ const GROQ_TOO_MANY_REQUESTS_MESSAGE =
|
||||
const GROQ_SERVICE_UNAVAILABLE_MESSAGE =
|
||||
"503 Service Unavailable: The server is temporarily unable to handle the request due to overloading or maintenance."; // pragma: allowlist secret
|
||||
|
||||
function expectMessageMatches(
|
||||
matcher: (message: string) => boolean,
|
||||
samples: readonly string[],
|
||||
expected: boolean,
|
||||
) {
|
||||
for (const sample of samples) {
|
||||
expect(matcher(sample), sample).toBe(expected);
|
||||
}
|
||||
}
|
||||
|
||||
describe("isAuthPermanentErrorMessage", () => {
|
||||
it("matches permanent auth failure patterns", () => {
|
||||
const samples = [
|
||||
"invalid_api_key",
|
||||
"api key revoked",
|
||||
"api key deactivated",
|
||||
"key has been disabled",
|
||||
"key has been revoked",
|
||||
"account has been deactivated",
|
||||
"could not authenticate api key",
|
||||
"could not validate credentials",
|
||||
"API_KEY_REVOKED",
|
||||
"api_key_deleted",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isAuthPermanentErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
it("does not match transient auth errors", () => {
|
||||
const samples = [
|
||||
"unauthorized",
|
||||
"invalid token",
|
||||
"authentication failed",
|
||||
"forbidden",
|
||||
"access denied",
|
||||
"token has expired",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isAuthPermanentErrorMessage(sample)).toBe(false);
|
||||
}
|
||||
it.each([
|
||||
{
|
||||
name: "matches permanent auth failure patterns",
|
||||
samples: [
|
||||
"invalid_api_key",
|
||||
"api key revoked",
|
||||
"api key deactivated",
|
||||
"key has been disabled",
|
||||
"key has been revoked",
|
||||
"account has been deactivated",
|
||||
"could not authenticate api key",
|
||||
"could not validate credentials",
|
||||
"API_KEY_REVOKED",
|
||||
"api_key_deleted",
|
||||
],
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "does not match transient auth errors",
|
||||
samples: [
|
||||
"unauthorized",
|
||||
"invalid token",
|
||||
"authentication failed",
|
||||
"forbidden",
|
||||
"access denied",
|
||||
"token has expired",
|
||||
],
|
||||
expected: false,
|
||||
},
|
||||
])("$name", ({ samples, expected }) => {
|
||||
expectMessageMatches(isAuthPermanentErrorMessage, samples, expected);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isAuthErrorMessage", () => {
|
||||
it("matches credential validation errors", () => {
|
||||
const samples = [
|
||||
'No credentials found for profile "anthropic:default".',
|
||||
"No API key found for profile openai.",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isAuthErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
it("matches OAuth refresh failures", () => {
|
||||
const samples = [
|
||||
"OAuth token refresh failed for anthropic: Failed to refresh OAuth token for anthropic. Please try again or re-authenticate.",
|
||||
"Please re-authenticate to continue.",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isAuthErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
it.each([
|
||||
'No credentials found for profile "anthropic:default".',
|
||||
"No API key found for profile openai.",
|
||||
"OAuth token refresh failed for anthropic: Failed to refresh OAuth token for anthropic. Please try again or re-authenticate.",
|
||||
"Please re-authenticate to continue.",
|
||||
])("matches auth errors for %j", (sample) => {
|
||||
expect(isAuthErrorMessage(sample)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isBillingErrorMessage", () => {
|
||||
it("matches credit / payment failures", () => {
|
||||
const samples = [
|
||||
"Your credit balance is too low to access the Anthropic API.",
|
||||
"insufficient credits",
|
||||
"Payment Required",
|
||||
"HTTP 402 Payment Required",
|
||||
"plans & billing",
|
||||
// Venice returns "Insufficient USD or Diem balance" which has extra words
|
||||
// between "insufficient" and "balance"
|
||||
"Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.",
|
||||
// OpenRouter returns "requires more credits" for underfunded accounts
|
||||
"This model requires more credits to use",
|
||||
"This endpoint require more credits",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isBillingErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
it("does not false-positive on issue IDs or text containing 402", () => {
|
||||
const falsePositives = [
|
||||
"Fixed issue CHE-402 in the latest release",
|
||||
"See ticket #402 for details",
|
||||
"ISSUE-402 has been resolved",
|
||||
"Room 402 is available",
|
||||
"Error code 403 was returned, not 402-related",
|
||||
"The building at 402 Main Street",
|
||||
"processed 402 records",
|
||||
"402 items found in the database",
|
||||
"port 402 is open",
|
||||
"Use a 402 stainless bolt",
|
||||
"Book a 402 room",
|
||||
"There is a 402 near me",
|
||||
];
|
||||
for (const sample of falsePositives) {
|
||||
expect(isBillingErrorMessage(sample)).toBe(false);
|
||||
}
|
||||
it.each([
|
||||
{
|
||||
name: "matches credit and payment failures",
|
||||
samples: [
|
||||
"Your credit balance is too low to access the Anthropic API.",
|
||||
"insufficient credits",
|
||||
"Payment Required",
|
||||
"HTTP 402 Payment Required",
|
||||
"plans & billing",
|
||||
"Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.",
|
||||
"This model requires more credits to use",
|
||||
"This endpoint require more credits",
|
||||
],
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "does not false-positive on issue ids and numeric references",
|
||||
samples: [
|
||||
"Fixed issue CHE-402 in the latest release",
|
||||
"See ticket #402 for details",
|
||||
"ISSUE-402 has been resolved",
|
||||
"Room 402 is available",
|
||||
"Error code 403 was returned, not 402-related",
|
||||
"The building at 402 Main Street",
|
||||
"processed 402 records",
|
||||
"402 items found in the database",
|
||||
"port 402 is open",
|
||||
"Use a 402 stainless bolt",
|
||||
"Book a 402 room",
|
||||
"There is a 402 near me",
|
||||
],
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "still matches real HTTP 402 billing errors",
|
||||
samples: [
|
||||
"HTTP 402 Payment Required",
|
||||
"status: 402",
|
||||
"error code 402",
|
||||
"http 402",
|
||||
"status=402 payment required",
|
||||
"got a 402 from the API",
|
||||
"returned 402",
|
||||
"received a 402 response",
|
||||
'{"status":402,"type":"error"}',
|
||||
'{"code":402,"message":"payment required"}',
|
||||
'{"error":{"code":402,"message":"billing hard limit reached"}}',
|
||||
],
|
||||
expected: true,
|
||||
},
|
||||
])("$name", ({ samples, expected }) => {
|
||||
expectMessageMatches(isBillingErrorMessage, samples, expected);
|
||||
});
|
||||
|
||||
it("does not false-positive on long assistant responses mentioning billing keywords", () => {
|
||||
// Simulate a multi-paragraph assistant response that mentions billing terms
|
||||
const longResponse =
|
||||
@ -176,37 +195,27 @@ describe("isBillingErrorMessage", () => {
|
||||
expect(longNonError.length).toBeGreaterThan(512);
|
||||
expect(isBillingErrorMessage(longNonError)).toBe(false);
|
||||
});
|
||||
it("still matches real HTTP 402 billing errors", () => {
|
||||
const realErrors = [
|
||||
"HTTP 402 Payment Required",
|
||||
"status: 402",
|
||||
"error code 402",
|
||||
"http 402",
|
||||
"status=402 payment required",
|
||||
"got a 402 from the API",
|
||||
"returned 402",
|
||||
"received a 402 response",
|
||||
'{"status":402,"type":"error"}',
|
||||
'{"code":402,"message":"payment required"}',
|
||||
'{"error":{"code":402,"message":"billing hard limit reached"}}',
|
||||
];
|
||||
for (const sample of realErrors) {
|
||||
expect(isBillingErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
|
||||
it("prefers billing when API-key and 402 hints both appear", () => {
|
||||
const sample =
|
||||
"402 Payment Required: The account associated with this API key has reached its maximum allowed monthly spending limit.";
|
||||
expect(isBillingErrorMessage(sample)).toBe(true);
|
||||
expect(classifyFailoverReason(sample)).toBe("billing");
|
||||
});
|
||||
});
|
||||
|
||||
describe("isCloudCodeAssistFormatError", () => {
|
||||
it("matches format errors", () => {
|
||||
const samples = [
|
||||
"INVALID_REQUEST_ERROR: string should match pattern",
|
||||
"messages.1.content.1.tool_use.id",
|
||||
"tool_use.id should match pattern",
|
||||
"invalid request format",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isCloudCodeAssistFormatError(sample)).toBe(true);
|
||||
}
|
||||
expectMessageMatches(
|
||||
isCloudCodeAssistFormatError,
|
||||
[
|
||||
"INVALID_REQUEST_ERROR: string should match pattern",
|
||||
"messages.1.content.1.tool_use.id",
|
||||
"tool_use.id should match pattern",
|
||||
"invalid request format",
|
||||
],
|
||||
true,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@ -238,20 +247,24 @@ describe("isCloudflareOrHtmlErrorPage", () => {
|
||||
});
|
||||
|
||||
describe("isCompactionFailureError", () => {
|
||||
it("matches compaction overflow failures", () => {
|
||||
const samples = [
|
||||
'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}',
|
||||
"auto-compaction failed due to context overflow",
|
||||
"Compaction failed: prompt is too long",
|
||||
"Summarization failed: context window exceeded for this request",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isCompactionFailureError(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
it("ignores non-compaction overflow errors", () => {
|
||||
expect(isCompactionFailureError("Context overflow: prompt too large")).toBe(false);
|
||||
expect(isCompactionFailureError("rate limit exceeded")).toBe(false);
|
||||
it.each([
|
||||
{
|
||||
name: "matches compaction overflow failures",
|
||||
samples: [
|
||||
'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}',
|
||||
"auto-compaction failed due to context overflow",
|
||||
"Compaction failed: prompt is too long",
|
||||
"Summarization failed: context window exceeded for this request",
|
||||
],
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "ignores non-compaction overflow errors",
|
||||
samples: ["Context overflow: prompt too large", "rate limit exceeded"],
|
||||
expected: false,
|
||||
},
|
||||
])("$name", ({ samples, expected }) => {
|
||||
expectMessageMatches(isCompactionFailureError, samples, expected);
|
||||
});
|
||||
});
|
||||
|
||||
@ -506,6 +519,10 @@ describe("isTransientHttpError", () => {
|
||||
});
|
||||
|
||||
describe("classifyFailoverReasonFromHttpStatus", () => {
|
||||
it("treats HTTP 401 permanent auth failures as auth_permanent", () => {
|
||||
expect(classifyFailoverReasonFromHttpStatus(401, "invalid_api_key")).toBe("auth_permanent");
|
||||
});
|
||||
|
||||
it("treats HTTP 422 as format error", () => {
|
||||
expect(classifyFailoverReasonFromHttpStatus(422)).toBe("format");
|
||||
expect(classifyFailoverReasonFromHttpStatus(422, "check open ai req parameter error")).toBe(
|
||||
@ -518,6 +535,10 @@ describe("classifyFailoverReasonFromHttpStatus", () => {
|
||||
expect(classifyFailoverReasonFromHttpStatus(422, "insufficient credits")).toBe("billing");
|
||||
});
|
||||
|
||||
it("treats HTTP 400 insufficient-quota payloads as billing instead of format", () => {
|
||||
expect(classifyFailoverReasonFromHttpStatus(400, INSUFFICIENT_QUOTA_PAYLOAD)).toBe("billing");
|
||||
});
|
||||
|
||||
it("treats HTTP 499 as transient for structured errors", () => {
|
||||
expect(classifyFailoverReasonFromHttpStatus(499)).toBe("timeout");
|
||||
expect(classifyFailoverReasonFromHttpStatus(499, "499 Client Closed Request")).toBe("timeout");
|
||||
|
||||
@ -1,9 +1,14 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import "./test-helpers/fast-coding-tools.js";
|
||||
import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import {
|
||||
cleanupEmbeddedPiRunnerTestWorkspace,
|
||||
createEmbeddedPiRunnerOpenAiConfig,
|
||||
createEmbeddedPiRunnerTestWorkspace,
|
||||
type EmbeddedPiRunnerTestWorkspace,
|
||||
immediateEnqueue,
|
||||
} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js";
|
||||
|
||||
function createMockUsage(input: number, output: number) {
|
||||
return {
|
||||
@ -88,7 +93,7 @@ vi.mock("@mariozechner/pi-ai", async () => {
|
||||
|
||||
let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent;
|
||||
let SessionManager: typeof import("@mariozechner/pi-coding-agent").SessionManager;
|
||||
let tempRoot: string | undefined;
|
||||
let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined;
|
||||
let agentDir: string;
|
||||
let workspaceDir: string;
|
||||
let sessionCounter = 0;
|
||||
@ -98,50 +103,21 @@ beforeAll(async () => {
|
||||
vi.useRealTimers();
|
||||
({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js"));
|
||||
({ SessionManager } = await import("@mariozechner/pi-coding-agent"));
|
||||
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-embedded-agent-"));
|
||||
agentDir = path.join(tempRoot, "agent");
|
||||
workspaceDir = path.join(tempRoot, "workspace");
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.mkdir(workspaceDir, { recursive: true });
|
||||
e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-embedded-agent-");
|
||||
({ agentDir, workspaceDir } = e2eWorkspace);
|
||||
}, 180_000);
|
||||
|
||||
afterAll(async () => {
|
||||
if (!tempRoot) {
|
||||
return;
|
||||
}
|
||||
await fs.rm(tempRoot, { recursive: true, force: true });
|
||||
tempRoot = undefined;
|
||||
await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace);
|
||||
e2eWorkspace = undefined;
|
||||
});
|
||||
|
||||
const makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies OpenClawConfig;
|
||||
|
||||
const nextSessionFile = () => {
|
||||
sessionCounter += 1;
|
||||
return path.join(workspaceDir, `session-${sessionCounter}.jsonl`);
|
||||
};
|
||||
const nextRunId = (prefix = "run-embedded-test") => `${prefix}-${++runCounter}`;
|
||||
const nextSessionKey = () => `agent:test:embedded:${nextRunId("session-key")}`;
|
||||
const immediateEnqueue = async <T>(task: () => Promise<T>) => task();
|
||||
|
||||
const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string) => {
|
||||
const sessionFile = nextSessionFile();
|
||||
@ -152,7 +128,7 @@ const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
|
||||
const cfg = makeOpenAiConfig(["mock-1"]);
|
||||
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-1"]);
|
||||
return await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey,
|
||||
@ -197,7 +173,7 @@ const readSessionMessages = async (sessionFile: string) => {
|
||||
};
|
||||
|
||||
const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessionKey: string) => {
|
||||
const cfg = makeOpenAiConfig(["mock-error"]);
|
||||
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]);
|
||||
await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey,
|
||||
@ -217,7 +193,7 @@ const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessi
|
||||
describe("runEmbeddedPiAgent", () => {
|
||||
it("handles prompt error paths without dropping user state", async () => {
|
||||
const sessionFile = nextSessionFile();
|
||||
const cfg = makeOpenAiConfig(["mock-error"]);
|
||||
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]);
|
||||
const sessionKey = nextSessionKey();
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
|
||||
@ -8,12 +8,17 @@
|
||||
* Follows the same pattern as pi-embedded-runner.e2e.test.ts.
|
||||
*/
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import "./test-helpers/fast-coding-tools.js";
|
||||
import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import { isEmbeddedPiRunActive, queueEmbeddedPiMessage } from "./pi-embedded-runner/runs.js";
|
||||
import {
|
||||
cleanupEmbeddedPiRunnerTestWorkspace,
|
||||
createEmbeddedPiRunnerOpenAiConfig,
|
||||
createEmbeddedPiRunnerTestWorkspace,
|
||||
type EmbeddedPiRunnerTestWorkspace,
|
||||
immediateEnqueue,
|
||||
} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js";
|
||||
|
||||
function createMockUsage(input: number, output: number) {
|
||||
return {
|
||||
@ -126,7 +131,7 @@ vi.mock("@mariozechner/pi-ai", async () => {
|
||||
});
|
||||
|
||||
let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent;
|
||||
let tempRoot: string | undefined;
|
||||
let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined;
|
||||
let agentDir: string;
|
||||
let workspaceDir: string;
|
||||
|
||||
@ -136,45 +141,15 @@ beforeAll(async () => {
|
||||
responsePlan = [];
|
||||
observedContexts = [];
|
||||
({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js"));
|
||||
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-yield-e2e-"));
|
||||
agentDir = path.join(tempRoot, "agent");
|
||||
workspaceDir = path.join(tempRoot, "workspace");
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.mkdir(workspaceDir, { recursive: true });
|
||||
e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-yield-e2e-");
|
||||
({ agentDir, workspaceDir } = e2eWorkspace);
|
||||
}, 180_000);
|
||||
|
||||
afterAll(async () => {
|
||||
if (!tempRoot) {
|
||||
return;
|
||||
}
|
||||
await fs.rm(tempRoot, { recursive: true, force: true });
|
||||
tempRoot = undefined;
|
||||
await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace);
|
||||
e2eWorkspace = undefined;
|
||||
});
|
||||
|
||||
const makeConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies OpenClawConfig;
|
||||
|
||||
const immediateEnqueue = async <T>(task: () => Promise<T>) => task();
|
||||
|
||||
const readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
@ -205,7 +180,7 @@ describe("sessions_yield e2e", () => {
|
||||
|
||||
const sessionId = "yield-e2e-parent";
|
||||
const sessionFile = path.join(workspaceDir, "session-yield-e2e.jsonl");
|
||||
const cfg = makeConfig(["mock-yield"]);
|
||||
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield"]);
|
||||
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId,
|
||||
@ -304,7 +279,7 @@ describe("sessions_yield e2e", () => {
|
||||
|
||||
const sessionId = "yield-e2e-abort";
|
||||
const sessionFile = path.join(workspaceDir, "session-yield-abort.jsonl");
|
||||
const cfg = makeConfig(["mock-yield-abort"]);
|
||||
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield-abort"]);
|
||||
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId,
|
||||
|
||||
@ -7,6 +7,7 @@ import {
|
||||
usesOpenAiStringModeAnthropicToolChoice,
|
||||
} from "../provider-capabilities.js";
|
||||
import { log } from "./logger.js";
|
||||
import { streamWithPayloadPatch } from "./stream-payload-utils.js";
|
||||
|
||||
const ANTHROPIC_CONTEXT_1M_BETA = "context-1m-2025-08-07";
|
||||
const ANTHROPIC_1M_MODEL_PREFIXES = ["claude-opus-4", "claude-sonnet-4"] as const;
|
||||
@ -341,18 +342,10 @@ export function createAnthropicFastModeWrapper(
|
||||
return underlying(model, context, options);
|
||||
}
|
||||
|
||||
const originalOnPayload = options?.onPayload;
|
||||
return underlying(model, context, {
|
||||
...options,
|
||||
onPayload: (payload) => {
|
||||
if (payload && typeof payload === "object") {
|
||||
const payloadObj = payload as Record<string, unknown>;
|
||||
if (payloadObj.service_tier === undefined) {
|
||||
payloadObj.service_tier = serviceTier;
|
||||
}
|
||||
}
|
||||
return originalOnPayload?.(payload, model);
|
||||
},
|
||||
return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => {
|
||||
if (payloadObj.service_tier === undefined) {
|
||||
payloadObj.service_tier = serviceTier;
|
||||
}
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
@ -278,6 +278,7 @@ vi.mock("../../config/channel-capabilities.js", () => ({
|
||||
}));
|
||||
|
||||
vi.mock("../../utils/message-channel.js", () => ({
|
||||
INTERNAL_MESSAGE_CHANNEL: "webchat",
|
||||
normalizeMessageChannel: vi.fn(() => undefined),
|
||||
}));
|
||||
|
||||
@ -375,6 +376,16 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
|
||||
unregisterApiProviders(getCustomApiRegistrySourceId("ollama"));
|
||||
});
|
||||
|
||||
async function runDirectCompaction(customInstructions = "focus on decisions") {
|
||||
return await compactEmbeddedPiSessionDirect({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:session-1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
customInstructions,
|
||||
});
|
||||
}
|
||||
|
||||
it("bootstraps runtime plugins with the resolved workspace", async () => {
|
||||
await compactEmbeddedPiSessionDirect({
|
||||
sessionId: "session-1",
|
||||
@ -472,13 +483,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
|
||||
hookRunner.hasHooks.mockReturnValue(true);
|
||||
sanitizeSessionHistoryMock.mockResolvedValue([]);
|
||||
|
||||
const result = await compactEmbeddedPiSessionDirect({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:session-1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
customInstructions: "focus on decisions",
|
||||
});
|
||||
const result = await runDirectCompaction();
|
||||
|
||||
expect(result.ok).toBe(true);
|
||||
const beforeContext = sessionHook("compact:before")?.context;
|
||||
@ -528,13 +533,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
|
||||
details: { ok: true },
|
||||
});
|
||||
|
||||
const result = await compactEmbeddedPiSessionDirect({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:session-1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
customInstructions: "focus on decisions",
|
||||
});
|
||||
const result = await runDirectCompaction();
|
||||
|
||||
expect(result).toMatchObject({
|
||||
ok: true,
|
||||
|
||||
@ -2,6 +2,7 @@ import type { StreamFn } from "@mariozechner/pi-agent-core";
|
||||
import type { SimpleStreamOptions } from "@mariozechner/pi-ai";
|
||||
import { streamSimple } from "@mariozechner/pi-ai";
|
||||
import { log } from "./logger.js";
|
||||
import { streamWithPayloadPatch } from "./stream-payload-utils.js";
|
||||
|
||||
type OpenAIServiceTier = "auto" | "default" | "flex" | "priority";
|
||||
type OpenAIReasoningEffort = "low" | "medium" | "high";
|
||||
@ -325,18 +326,10 @@ export function createOpenAIServiceTierWrapper(
|
||||
) {
|
||||
return underlying(model, context, options);
|
||||
}
|
||||
const originalOnPayload = options?.onPayload;
|
||||
return underlying(model, context, {
|
||||
...options,
|
||||
onPayload: (payload) => {
|
||||
if (payload && typeof payload === "object") {
|
||||
const payloadObj = payload as Record<string, unknown>;
|
||||
if (payloadObj.service_tier === undefined) {
|
||||
payloadObj.service_tier = serviceTier;
|
||||
}
|
||||
}
|
||||
return originalOnPayload?.(payload, model);
|
||||
},
|
||||
return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => {
|
||||
if (payloadObj.service_tier === undefined) {
|
||||
payloadObj.service_tier = serviceTier;
|
||||
}
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
@ -249,6 +249,72 @@ function createSubscriptionMock() {
|
||||
};
|
||||
}
|
||||
|
||||
function resetEmbeddedAttemptHarness(
|
||||
params: {
|
||||
includeSpawnSubagent?: boolean;
|
||||
subscribeImpl?: () => ReturnType<typeof createSubscriptionMock>;
|
||||
sessionMessages?: AgentMessage[];
|
||||
} = {},
|
||||
) {
|
||||
if (params.includeSpawnSubagent) {
|
||||
hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({
|
||||
status: "accepted",
|
||||
childSessionKey: "agent:main:subagent:child",
|
||||
runId: "run-child",
|
||||
});
|
||||
}
|
||||
hoisted.createAgentSessionMock.mockReset();
|
||||
hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager);
|
||||
hoisted.resolveSandboxContextMock.mockReset();
|
||||
hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({
|
||||
release: async () => {},
|
||||
});
|
||||
hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null);
|
||||
hoisted.sessionManager.branch.mockReset();
|
||||
hoisted.sessionManager.resetLeaf.mockReset();
|
||||
hoisted.sessionManager.buildSessionContext
|
||||
.mockReset()
|
||||
.mockReturnValue({ messages: params.sessionMessages ?? [] });
|
||||
hoisted.sessionManager.appendCustomEntry.mockReset();
|
||||
if (params.subscribeImpl) {
|
||||
hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(params.subscribeImpl);
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanupTempPaths(tempPaths: string[]) {
|
||||
while (tempPaths.length > 0) {
|
||||
const target = tempPaths.pop();
|
||||
if (target) {
|
||||
await fs.rm(target, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function createDefaultEmbeddedSession(): MutableSession {
|
||||
const session: MutableSession = {
|
||||
sessionId: "embedded-session",
|
||||
messages: [],
|
||||
isCompacting: false,
|
||||
isStreaming: false,
|
||||
agent: {
|
||||
replaceMessages: (messages: unknown[]) => {
|
||||
session.messages = [...messages];
|
||||
},
|
||||
},
|
||||
prompt: async () => {
|
||||
session.messages = [
|
||||
...session.messages,
|
||||
{ role: "assistant", content: "done", timestamp: 2 },
|
||||
];
|
||||
},
|
||||
abort: async () => {},
|
||||
dispose: () => {},
|
||||
steer: async () => {},
|
||||
};
|
||||
|
||||
return session;
|
||||
}
|
||||
|
||||
const testModel = {
|
||||
api: "openai-completions",
|
||||
provider: "openai",
|
||||
@ -269,32 +335,14 @@ describe("runEmbeddedAttempt sessions_spawn workspace inheritance", () => {
|
||||
const tempPaths: string[] = [];
|
||||
|
||||
beforeEach(() => {
|
||||
hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({
|
||||
status: "accepted",
|
||||
childSessionKey: "agent:main:subagent:child",
|
||||
runId: "run-child",
|
||||
resetEmbeddedAttemptHarness({
|
||||
includeSpawnSubagent: true,
|
||||
subscribeImpl: createSubscriptionMock,
|
||||
});
|
||||
hoisted.createAgentSessionMock.mockReset();
|
||||
hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager);
|
||||
hoisted.resolveSandboxContextMock.mockReset();
|
||||
hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(createSubscriptionMock);
|
||||
hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({
|
||||
release: async () => {},
|
||||
});
|
||||
hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null);
|
||||
hoisted.sessionManager.branch.mockReset();
|
||||
hoisted.sessionManager.resetLeaf.mockReset();
|
||||
hoisted.sessionManager.buildSessionContext.mockReset().mockReturnValue({ messages: [] });
|
||||
hoisted.sessionManager.appendCustomEntry.mockReset();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
while (tempPaths.length > 0) {
|
||||
const target = tempPaths.pop();
|
||||
if (target) {
|
||||
await fs.rm(target, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
await cleanupTempPaths(tempPaths);
|
||||
});
|
||||
|
||||
it("passes the real workspace to sessions_spawn when workspaceAccess is ro", async () => {
|
||||
@ -394,26 +442,11 @@ describe("runEmbeddedAttempt cache-ttl tracking after compaction", () => {
|
||||
const tempPaths: string[] = [];
|
||||
|
||||
beforeEach(() => {
|
||||
hoisted.createAgentSessionMock.mockReset();
|
||||
hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager);
|
||||
hoisted.resolveSandboxContextMock.mockReset();
|
||||
hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({
|
||||
release: async () => {},
|
||||
});
|
||||
hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null);
|
||||
hoisted.sessionManager.branch.mockReset();
|
||||
hoisted.sessionManager.resetLeaf.mockReset();
|
||||
hoisted.sessionManager.buildSessionContext.mockReset().mockReturnValue({ messages: [] });
|
||||
hoisted.sessionManager.appendCustomEntry.mockReset();
|
||||
resetEmbeddedAttemptHarness();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
while (tempPaths.length > 0) {
|
||||
const target = tempPaths.pop();
|
||||
if (target) {
|
||||
await fs.rm(target, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
await cleanupTempPaths(tempPaths);
|
||||
});
|
||||
|
||||
async function runAttemptWithCacheTtl(compactionCount: number) {
|
||||
@ -428,30 +461,9 @@ describe("runEmbeddedAttempt cache-ttl tracking after compaction", () => {
|
||||
getCompactionCount: () => compactionCount,
|
||||
}));
|
||||
|
||||
hoisted.createAgentSessionMock.mockImplementation(async () => {
|
||||
const session: MutableSession = {
|
||||
sessionId: "embedded-session",
|
||||
messages: [],
|
||||
isCompacting: false,
|
||||
isStreaming: false,
|
||||
agent: {
|
||||
replaceMessages: (messages: unknown[]) => {
|
||||
session.messages = [...messages];
|
||||
},
|
||||
},
|
||||
prompt: async () => {
|
||||
session.messages = [
|
||||
...session.messages,
|
||||
{ role: "assistant", content: "done", timestamp: 2 },
|
||||
];
|
||||
},
|
||||
abort: async () => {},
|
||||
dispose: () => {},
|
||||
steer: async () => {},
|
||||
};
|
||||
|
||||
return { session };
|
||||
});
|
||||
hoisted.createAgentSessionMock.mockImplementation(async () => ({
|
||||
session: createDefaultEmbeddedSession(),
|
||||
}));
|
||||
|
||||
return await runEmbeddedAttempt({
|
||||
sessionId: "embedded-session",
|
||||
@ -591,30 +603,9 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => {
|
||||
.mockReset()
|
||||
.mockReturnValue({ messages: seedMessages });
|
||||
|
||||
hoisted.createAgentSessionMock.mockImplementation(async () => {
|
||||
const session: MutableSession = {
|
||||
sessionId: "embedded-session",
|
||||
messages: [],
|
||||
isCompacting: false,
|
||||
isStreaming: false,
|
||||
agent: {
|
||||
replaceMessages: (messages: unknown[]) => {
|
||||
session.messages = [...messages];
|
||||
},
|
||||
},
|
||||
prompt: async () => {
|
||||
session.messages = [
|
||||
...session.messages,
|
||||
{ role: "assistant", content: "done", timestamp: 2 },
|
||||
];
|
||||
},
|
||||
abort: async () => {},
|
||||
dispose: () => {},
|
||||
steer: async () => {},
|
||||
};
|
||||
|
||||
return { session };
|
||||
});
|
||||
hoisted.createAgentSessionMock.mockImplementation(async () => ({
|
||||
session: createDefaultEmbeddedSession(),
|
||||
}));
|
||||
|
||||
return await runEmbeddedAttempt({
|
||||
sessionId: "embedded-session",
|
||||
|
||||
20
src/agents/pi-embedded-runner/stream-payload-utils.ts
Normal file
20
src/agents/pi-embedded-runner/stream-payload-utils.ts
Normal file
@ -0,0 +1,20 @@
|
||||
import type { StreamFn } from "@mariozechner/pi-agent-core";
|
||||
|
||||
export function streamWithPayloadPatch(
|
||||
underlying: StreamFn,
|
||||
model: Parameters<StreamFn>[0],
|
||||
context: Parameters<StreamFn>[1],
|
||||
options: Parameters<StreamFn>[2],
|
||||
patchPayload: (payload: Record<string, unknown>) => void,
|
||||
) {
|
||||
const originalOnPayload = options?.onPayload;
|
||||
return underlying(model, context, {
|
||||
...options,
|
||||
onPayload: (payload) => {
|
||||
if (payload && typeof payload === "object") {
|
||||
patchPayload(payload as Record<string, unknown>);
|
||||
}
|
||||
return originalOnPayload?.(payload, model);
|
||||
},
|
||||
});
|
||||
}
|
||||
@ -1,22 +1,13 @@
|
||||
import { spawnSync } from "node:child_process";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { withTempDir } from "../../test-helpers/temp-dir.js";
|
||||
import {
|
||||
buildPinnedWritePlan,
|
||||
SANDBOX_PINNED_MUTATION_PYTHON,
|
||||
} from "./fs-bridge-mutation-helper.js";
|
||||
|
||||
async function withTempRoot<T>(prefix: string, run: (root: string) => Promise<T>): Promise<T> {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
|
||||
try {
|
||||
return await run(root);
|
||||
} finally {
|
||||
await fs.rm(root, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
function runMutation(args: string[], input?: string) {
|
||||
return spawnSync("python3", ["-c", SANDBOX_PINNED_MUTATION_PYTHON, ...args], {
|
||||
input,
|
||||
@ -56,7 +47,7 @@ function runWritePlan(args: string[], input?: string) {
|
||||
|
||||
describe("sandbox pinned mutation helper", () => {
|
||||
it("writes through a pinned directory fd", async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
|
||||
@ -72,7 +63,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
it.runIf(process.platform !== "win32")(
|
||||
"preserves stdin payload bytes when the pinned write plan runs through sh",
|
||||
async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
|
||||
@ -92,7 +83,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
it.runIf(process.platform !== "win32")(
|
||||
"rejects symlink-parent writes instead of materializing a temp file outside the mount",
|
||||
async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
const outside = path.join(root, "outside");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
@ -108,7 +99,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
);
|
||||
|
||||
it.runIf(process.platform !== "win32")("rejects symlink segments during mkdirp", async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
const outside = path.join(root, "outside");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
@ -123,7 +114,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
});
|
||||
|
||||
it.runIf(process.platform !== "win32")("remove unlinks the symlink itself", async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
const outside = path.join(root, "outside");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
@ -144,7 +135,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
it.runIf(process.platform !== "win32")(
|
||||
"rejects symlink destination parents during rename",
|
||||
async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
const outside = path.join(root, "outside");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
@ -175,7 +166,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
it.runIf(process.platform !== "win32")(
|
||||
"copies directories across different mount roots during rename fallback",
|
||||
async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const sourceRoot = path.join(root, "source");
|
||||
const destRoot = path.join(root, "dest");
|
||||
await fs.mkdir(path.join(sourceRoot, "dir", "nested"), { recursive: true });
|
||||
|
||||
57
src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts
Normal file
57
src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts
Normal file
@ -0,0 +1,57 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
|
||||
export type EmbeddedPiRunnerTestWorkspace = {
|
||||
tempRoot: string;
|
||||
agentDir: string;
|
||||
workspaceDir: string;
|
||||
};
|
||||
|
||||
export async function createEmbeddedPiRunnerTestWorkspace(
|
||||
prefix: string,
|
||||
): Promise<EmbeddedPiRunnerTestWorkspace> {
|
||||
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
|
||||
const agentDir = path.join(tempRoot, "agent");
|
||||
const workspaceDir = path.join(tempRoot, "workspace");
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.mkdir(workspaceDir, { recursive: true });
|
||||
return { tempRoot, agentDir, workspaceDir };
|
||||
}
|
||||
|
||||
export async function cleanupEmbeddedPiRunnerTestWorkspace(
|
||||
workspace: EmbeddedPiRunnerTestWorkspace | undefined,
|
||||
): Promise<void> {
|
||||
if (!workspace) {
|
||||
return;
|
||||
}
|
||||
await fs.rm(workspace.tempRoot, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
export function createEmbeddedPiRunnerOpenAiConfig(modelIds: string[]): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export async function immediateEnqueue<T>(task: () => Promise<T>): Promise<T> {
|
||||
return await task();
|
||||
}
|
||||
@ -1,5 +1,5 @@
|
||||
import { Type } from "@sinclair/typebox";
|
||||
import { loadConfig } from "../../config/config.js";
|
||||
import { type OpenClawConfig, loadConfig } from "../../config/config.js";
|
||||
import { callGateway } from "../../gateway/call.js";
|
||||
import { capArrayByJsonBytes } from "../../gateway/session-utils.fs.js";
|
||||
import { jsonUtf8Bytes } from "../../infra/json-utf8-bytes.js";
|
||||
@ -169,6 +169,7 @@ function enforceSessionsHistoryHardCap(params: {
|
||||
export function createSessionsHistoryTool(opts?: {
|
||||
agentSessionKey?: string;
|
||||
sandboxed?: boolean;
|
||||
config?: OpenClawConfig;
|
||||
}): AnyAgentTool {
|
||||
return {
|
||||
label: "Session History",
|
||||
@ -180,7 +181,7 @@ export function createSessionsHistoryTool(opts?: {
|
||||
const sessionKeyParam = readStringParam(params, "sessionKey", {
|
||||
required: true,
|
||||
});
|
||||
const cfg = loadConfig();
|
||||
const cfg = opts?.config ?? loadConfig();
|
||||
const { mainKey, alias, effectiveRequesterKey, restrictToSpawned } =
|
||||
resolveSandboxedSessionToolContext({
|
||||
cfg,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user